repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import java.net.URI;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSUtilClient;
/** Interface that represents the over the wire information for a file.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class HdfsFileStatus {
private final byte[] path; // local name of the inode that's encoded in java UTF8
private final byte[] symlink; // symlink target encoded in java UTF8 or null
private final long length;
private final boolean isdir;
private final short block_replication;
private final long blocksize;
private final long modification_time;
private final long access_time;
private final FsPermission permission;
private final String owner;
private final String group;
private final long fileId;
private final FileEncryptionInfo feInfo;
// Used by dir, not including dot and dotdot. Always zero for a regular file.
private final int childrenNum;
private final byte storagePolicy;
public static final byte[] EMPTY_NAME = new byte[0];
/**
* Constructor
* @param length the number of bytes the file has
* @param isdir if the path is a directory
* @param block_replication the replication factor
* @param blocksize the block size
* @param modification_time modification time
* @param access_time access time
* @param permission permission
* @param owner the owner of the path
* @param group the group of the path
* @param path the local name in java UTF8 encoding the same as that in-memory
* @param fileId the file id
* @param feInfo the file's encryption info
*/
public HdfsFileStatus(long length, boolean isdir, int block_replication,
long blocksize, long modification_time, long access_time,
FsPermission permission, String owner, String group, byte[] symlink,
byte[] path, long fileId, int childrenNum, FileEncryptionInfo feInfo,
byte storagePolicy) {
this.length = length;
this.isdir = isdir;
this.block_replication = (short)block_replication;
this.blocksize = blocksize;
this.modification_time = modification_time;
this.access_time = access_time;
this.permission = (permission == null) ?
((isdir || symlink!=null) ?
FsPermission.getDefault() :
FsPermission.getFileDefault()) :
permission;
this.owner = (owner == null) ? "" : owner;
this.group = (group == null) ? "" : group;
this.symlink = symlink;
this.path = path;
this.fileId = fileId;
this.childrenNum = childrenNum;
this.feInfo = feInfo;
this.storagePolicy = storagePolicy;
}
/**
* Get the length of this file, in bytes.
* @return the length of this file, in bytes.
*/
public final long getLen() {
return length;
}
/**
* Is this a directory?
* @return true if this is a directory
*/
public final boolean isDir() {
return isdir;
}
/**
* Is this a symbolic link?
* @return true if this is a symbolic link
*/
public boolean isSymlink() {
return symlink != null;
}
/**
* Get the block size of the file.
* @return the number of bytes
*/
public final long getBlockSize() {
return blocksize;
}
/**
* Get the replication factor of a file.
* @return the replication factor of a file.
*/
public final short getReplication() {
return block_replication;
}
/**
* Get the modification time of the file.
* @return the modification time of file in milliseconds since January 1, 1970 UTC.
*/
public final long getModificationTime() {
return modification_time;
}
/**
* Get the access time of the file.
* @return the access time of file in milliseconds since January 1, 1970 UTC.
*/
public final long getAccessTime() {
return access_time;
}
/**
* Get FsPermission associated with the file.
* @return permssion
*/
public final FsPermission getPermission() {
return permission;
}
/**
* Get the owner of the file.
* @return owner of the file
*/
public final String getOwner() {
return owner;
}
/**
* Get the group associated with the file.
* @return group for the file.
*/
public final String getGroup() {
return group;
}
/**
* Check if the local name is empty
* @return true if the name is empty
*/
public final boolean isEmptyLocalName() {
return path.length == 0;
}
/**
* Get the string representation of the local name
* @return the local name in string
*/
public final String getLocalName() {
return DFSUtilClient.bytes2String(path);
}
/**
* Get the Java UTF8 representation of the local name
* @return the local name in java UTF8
*/
public final byte[] getLocalNameInBytes() {
return path;
}
/**
* Get the string representation of the full path name
* @param parent the parent path
* @return the full path in string
*/
public final String getFullName(final String parent) {
if (isEmptyLocalName()) {
return parent;
}
StringBuilder fullName = new StringBuilder(parent);
if (!parent.endsWith(Path.SEPARATOR)) {
fullName.append(Path.SEPARATOR);
}
fullName.append(getLocalName());
return fullName.toString();
}
/**
* Get the full path
* @param parent the parent path
* @return the full path
*/
public final Path getFullPath(final Path parent) {
if (isEmptyLocalName()) {
return parent;
}
return new Path(parent, getLocalName());
}
/**
* Get the string representation of the symlink.
* @return the symlink as a string.
*/
public final String getSymlink() {
return DFSUtilClient.bytes2String(symlink);
}
public final byte[] getSymlinkInBytes() {
return symlink;
}
public final long getFileId() {
return fileId;
}
public final FileEncryptionInfo getFileEncryptionInfo() {
return feInfo;
}
public final int getChildrenNum() {
return childrenNum;
}
/** @return the storage policy id */
public final byte getStoragePolicy() {
return storagePolicy;
}
public final FileStatus makeQualified(URI defaultUri, Path path) {
return new FileStatus(getLen(), isDir(), getReplication(),
getBlockSize(), getModificationTime(),
getAccessTime(),
getPermission(), getOwner(), getGroup(),
isSymlink() ? new Path(getSymlink()) : null,
(getFullPath(path)).makeQualified(
defaultUri, null)); // fully-qualify path
}
}
| 7,582 | 26.878676 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/RollingUpgradeInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import java.util.Date;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Rolling upgrade information
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class RollingUpgradeInfo extends RollingUpgradeStatus {
private final long startTime;
private long finalizeTime;
private boolean createdRollbackImages;
public RollingUpgradeInfo(String blockPoolId, boolean createdRollbackImages,
long startTime, long finalizeTime) {
super(blockPoolId, finalizeTime != 0);
this.createdRollbackImages = createdRollbackImages;
this.startTime = startTime;
this.finalizeTime = finalizeTime;
}
public boolean createdRollbackImages() {
return createdRollbackImages;
}
public void setCreatedRollbackImages(boolean created) {
this.createdRollbackImages = created;
}
public boolean isStarted() {
return startTime != 0;
}
/** @return The rolling upgrade starting time. */
public long getStartTime() {
return startTime;
}
@Override
public boolean isFinalized() {
return finalizeTime != 0;
}
/**
* Finalize the upgrade if not already finalized
* @param finalizeTime
*/
public void finalize(long finalizeTime) {
if (finalizeTime != 0) {
this.finalizeTime = finalizeTime;
createdRollbackImages = false;
}
}
public long getFinalizeTime() {
return finalizeTime;
}
@Override
public int hashCode() {
//only use lower 32 bits
return super.hashCode() ^ (int)startTime ^ (int)finalizeTime;
}
@Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
} else if (obj == null || !(obj instanceof RollingUpgradeInfo)) {
return false;
}
final RollingUpgradeInfo that = (RollingUpgradeInfo)obj;
return super.equals(that)
&& this.startTime == that.startTime
&& this.finalizeTime == that.finalizeTime;
}
@Override
public String toString() {
return super.toString()
+ "\n Start Time: " + (startTime == 0? "<NOT STARTED>": timestamp2String(startTime))
+ "\n Finalize Time: " + (finalizeTime == 0? "<NOT FINALIZED>": timestamp2String(finalizeTime));
}
private static String timestamp2String(long timestamp) {
return new Date(timestamp) + " (=" + timestamp + ")";
}
public static class Bean {
private final String blockPoolId;
private final long startTime;
private final long finalizeTime;
private final boolean createdRollbackImages;
public Bean(RollingUpgradeInfo f) {
this.blockPoolId = f.getBlockPoolId();
this.startTime = f.startTime;
this.finalizeTime = f.finalizeTime;
this.createdRollbackImages = f.createdRollbackImages();
}
public String getBlockPoolId() {
return blockPoolId;
}
public long getStartTime() {
return startTime;
}
public long getFinalizeTime() {
return finalizeTime;
}
public boolean isCreatedRollbackImages() {
return createdRollbackImages;
}
}
}
| 3,936 | 27.121429 | 104 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolStats.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* CachePoolStats describes cache pool statistics.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class CachePoolStats {
public static class Builder {
private long bytesNeeded;
private long bytesCached;
private long bytesOverlimit;
private long filesNeeded;
private long filesCached;
public Builder() {
}
public Builder setBytesNeeded(long bytesNeeded) {
this.bytesNeeded = bytesNeeded;
return this;
}
public Builder setBytesCached(long bytesCached) {
this.bytesCached = bytesCached;
return this;
}
public Builder setBytesOverlimit(long bytesOverlimit) {
this.bytesOverlimit = bytesOverlimit;
return this;
}
public Builder setFilesNeeded(long filesNeeded) {
this.filesNeeded = filesNeeded;
return this;
}
public Builder setFilesCached(long filesCached) {
this.filesCached = filesCached;
return this;
}
public CachePoolStats build() {
return new CachePoolStats(bytesNeeded, bytesCached, bytesOverlimit,
filesNeeded, filesCached);
}
};
private final long bytesNeeded;
private final long bytesCached;
private final long bytesOverlimit;
private final long filesNeeded;
private final long filesCached;
private CachePoolStats(long bytesNeeded, long bytesCached,
long bytesOverlimit, long filesNeeded, long filesCached) {
this.bytesNeeded = bytesNeeded;
this.bytesCached = bytesCached;
this.bytesOverlimit = bytesOverlimit;
this.filesNeeded = filesNeeded;
this.filesCached = filesCached;
}
public long getBytesNeeded() {
return bytesNeeded;
}
public long getBytesCached() {
return bytesCached;
}
public long getBytesOverlimit() {
return bytesOverlimit;
}
public long getFilesNeeded() {
return filesNeeded;
}
public long getFilesCached() {
return filesCached;
}
public String toString() {
return new StringBuilder().append("{").
append("bytesNeeded:").append(bytesNeeded).
append(", bytesCached:").append(bytesCached).
append(", bytesOverlimit:").append(bytesOverlimit).
append(", filesNeeded:").append(filesNeeded).
append(", filesCached:").append(filesCached).
append("}").toString();
}
}
| 3,271 | 27.206897 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotAccessControlException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import org.apache.hadoop.security.AccessControlException;
/** Snapshot access related exception. */
public class SnapshotAccessControlException extends AccessControlException {
private static final long serialVersionUID = 1L;
public SnapshotAccessControlException(final String message) {
super(message);
}
public SnapshotAccessControlException(final Throwable cause) {
super(cause);
}
}
| 1,257 | 36 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveEntry.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Describes a path-based cache directive entry.
*/
@InterfaceStability.Evolving
@InterfaceAudience.Public
public class CacheDirectiveEntry {
private final CacheDirectiveInfo info;
private final CacheDirectiveStats stats;
public CacheDirectiveEntry(CacheDirectiveInfo info,
CacheDirectiveStats stats) {
this.info = info;
this.stats = stats;
}
public CacheDirectiveInfo getInfo() {
return info;
}
public CacheDirectiveStats getStats() {
return stats;
}
};
| 1,470 | 30.978261 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DSQuotaExceededException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import static org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix.long2String;
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class DSQuotaExceededException extends QuotaExceededException {
protected static final long serialVersionUID = 1L;
public DSQuotaExceededException() {}
public DSQuotaExceededException(String msg) {
super(msg);
}
public DSQuotaExceededException(long quota, long count) {
super(quota, count);
}
@Override
public String getMessage() {
String msg = super.getMessage();
if (msg == null) {
return "The DiskSpace quota" + (pathName==null?"": " of " + pathName)
+ " is exceeded: quota = " + quota + " B = " + long2String(quota, "B", 2)
+ " but diskspace consumed = " + count + " B = " + long2String(count, "B", 2);
} else {
return msg;
}
}
}
| 1,831 | 34.230769 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/RollingUpgradeStatus.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Rolling upgrade status
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class RollingUpgradeStatus {
private final String blockPoolId;
private final boolean finalized;
public RollingUpgradeStatus(String blockPoolId, boolean finalized) {
this.blockPoolId = blockPoolId;
this.finalized = finalized;
}
public String getBlockPoolId() {
return blockPoolId;
}
public boolean isFinalized() {
return finalized;
}
@Override
public int hashCode() {
return blockPoolId.hashCode();
}
@Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
} else if (obj == null || !(obj instanceof RollingUpgradeStatus)) {
return false;
}
final RollingUpgradeStatus that = (RollingUpgradeStatus) obj;
return this.blockPoolId.equals(that.blockPoolId)
&& this.isFinalized() == that.isFinalized();
}
@Override
public String toString() {
return " Block Pool ID: " + blockPoolId;
}
}
| 1,973 | 28.462687 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/QuotaExceededException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* This exception is thrown when modification to HDFS results in violation
* of a directory quota. A directory quota might be namespace quota (limit
* on number of files and directories) or a diskspace quota (limit on space
* taken by all the file under the directory tree). <br> <br>
*
* The message for the exception specifies the directory where the quota
* was violated and actual quotas. Specific message is generated in the
* corresponding Exception class:
* DSQuotaExceededException or
* NSQuotaExceededException
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class QuotaExceededException extends IOException {
protected static final long serialVersionUID = 1L;
protected String pathName=null;
protected long quota; // quota
protected long count; // actual value
protected QuotaExceededException() {}
protected QuotaExceededException(String msg) {
super(msg);
}
protected QuotaExceededException(long quota, long count) {
this.quota = quota;
this.count = count;
}
public void setPathName(String path) {
this.pathName = path;
}
@Override
public String getMessage() {
return super.getMessage();
}
}
| 2,190 | 32.19697 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CorruptFileBlocks.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import java.util.Arrays;
/**
* Contains a list of paths corresponding to corrupt files and a cookie
* used for iterative calls to NameNode.listCorruptFileBlocks.
*
*/
public class CorruptFileBlocks {
// used for hashCode
private static final int PRIME = 16777619;
private final String[] files;
private final String cookie;
public CorruptFileBlocks() {
this(new String[0], "");
}
public CorruptFileBlocks(String[] files, String cookie) {
this.files = files;
this.cookie = cookie;
}
public String[] getFiles() {
return files;
}
public String getCookie() {
return cookie;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (!(obj instanceof CorruptFileBlocks)) {
return false;
}
CorruptFileBlocks other = (CorruptFileBlocks) obj;
return cookie.equals(other.cookie) &&
Arrays.equals(files, other.files);
}
@Override
public int hashCode() {
int result = cookie.hashCode();
for (String file : files) {
result = PRIME * result + file.hashCode();
}
return result;
}
}
| 1,978 | 25.039474 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import java.util.Arrays;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.security.token.Token;
import com.google.common.collect.Lists;
/**
* Associates a block with the Datanodes that contain its replicas
* and other block metadata (E.g. the file offset associated with this
* block, whether it is corrupt, a location is cached in memory,
* security token, etc).
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class LocatedBlock {
private final ExtendedBlock b;
private long offset; // offset of the first byte of the block in the file
private final DatanodeInfoWithStorage[] locs;
/** Cached storage ID for each replica */
private final String[] storageIDs;
/** Cached storage type for each replica, if reported. */
private final StorageType[] storageTypes;
// corrupt flag is true if all of the replicas of a block are corrupt.
// else false. If block has few corrupt replicas, they are filtered and
// their locations are not part of this object
private boolean corrupt;
private Token<BlockTokenIdentifier> blockToken = new Token<BlockTokenIdentifier>();
/**
* List of cached datanode locations
*/
private DatanodeInfo[] cachedLocs;
// Used when there are no locations
private static final DatanodeInfoWithStorage[] EMPTY_LOCS =
new DatanodeInfoWithStorage[0];
public LocatedBlock(ExtendedBlock b, DatanodeInfo[] locs) {
// By default, startOffset is unknown(-1) and corrupt is false.
this(b, locs, null, null, -1, false, EMPTY_LOCS);
}
public LocatedBlock(ExtendedBlock b, DatanodeInfo[] locs,
String[] storageIDs, StorageType[] storageTypes) {
this(b, locs, storageIDs, storageTypes, -1, false, EMPTY_LOCS);
}
public LocatedBlock(ExtendedBlock b, DatanodeInfo[] locs, String[] storageIDs,
StorageType[] storageTypes, long startOffset,
boolean corrupt, DatanodeInfo[] cachedLocs) {
this.b = b;
this.offset = startOffset;
this.corrupt = corrupt;
if (locs==null) {
this.locs = EMPTY_LOCS;
} else {
this.locs = new DatanodeInfoWithStorage[locs.length];
for(int i = 0; i < locs.length; i++) {
DatanodeInfo di = locs[i];
DatanodeInfoWithStorage storage = new DatanodeInfoWithStorage(di,
storageIDs != null ? storageIDs[i] : null,
storageTypes != null ? storageTypes[i] : null);
this.locs[i] = storage;
}
}
this.storageIDs = storageIDs;
this.storageTypes = storageTypes;
if (cachedLocs == null || cachedLocs.length == 0) {
this.cachedLocs = EMPTY_LOCS;
} else {
this.cachedLocs = cachedLocs;
}
}
public Token<BlockTokenIdentifier> getBlockToken() {
return blockToken;
}
public void setBlockToken(Token<BlockTokenIdentifier> token) {
this.blockToken = token;
}
public ExtendedBlock getBlock() {
return b;
}
/**
* Returns the locations associated with this block. The returned array is not
* expected to be modified. If it is, caller must immediately invoke
* {@link org.apache.hadoop.hdfs.protocol.LocatedBlock#updateCachedStorageInfo}
* to update the cached Storage ID/Type arrays.
*/
public DatanodeInfo[] getLocations() {
return locs;
}
public StorageType[] getStorageTypes() {
return storageTypes;
}
public String[] getStorageIDs() {
return storageIDs;
}
/**
* Updates the cached StorageID and StorageType information. Must be
* called when the locations array is modified.
*/
public void updateCachedStorageInfo() {
if (storageIDs != null) {
for(int i = 0; i < locs.length; i++) {
storageIDs[i] = locs[i].getStorageID();
}
}
if (storageTypes != null) {
for(int i = 0; i < locs.length; i++) {
storageTypes[i] = locs[i].getStorageType();
}
}
}
public long getStartOffset() {
return offset;
}
public long getBlockSize() {
return b.getNumBytes();
}
public void setStartOffset(long value) {
this.offset = value;
}
public void setCorrupt(boolean corrupt) {
this.corrupt = corrupt;
}
public boolean isCorrupt() {
return this.corrupt;
}
/**
* Add a the location of a cached replica of the block.
*
* @param loc of datanode with the cached replica
*/
public void addCachedLoc(DatanodeInfo loc) {
List<DatanodeInfo> cachedList = Lists.newArrayList(cachedLocs);
if (cachedList.contains(loc)) {
return;
}
// Try to re-use a DatanodeInfo already in loc
for (DatanodeInfoWithStorage di : locs) {
if (loc.equals(di)) {
cachedList.add(di);
cachedLocs = cachedList.toArray(cachedLocs);
return;
}
}
// Not present in loc, add it and go
cachedList.add(loc);
cachedLocs = cachedList.toArray(cachedLocs);
}
/**
* @return Datanodes with a cached block replica
*/
public DatanodeInfo[] getCachedLocations() {
return cachedLocs;
}
@Override
public String toString() {
return getClass().getSimpleName() + "{" + b
+ "; getBlockSize()=" + getBlockSize()
+ "; corrupt=" + corrupt
+ "; offset=" + offset
+ "; locs=" + Arrays.asList(locs)
+ "}";
}
}
| 6,353 | 29.695652 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import java.io.IOException;
import javax.annotation.Nullable;
import org.apache.commons.lang.builder.EqualsBuilder;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.InvalidRequestException;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo.Expiration;
/**
* CachePoolInfo describes a cache pool.
*
* This class is used in RPCs to create and modify cache pools.
* It is serializable and can be stored in the edit log.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class CachePoolInfo {
/**
* Indicates that the pool does not have a maximum relative expiry.
*/
public static final long RELATIVE_EXPIRY_NEVER =
Expiration.MAX_RELATIVE_EXPIRY_MS;
/**
* Default max relative expiry for cache pools.
*/
public static final long DEFAULT_MAX_RELATIVE_EXPIRY =
RELATIVE_EXPIRY_NEVER;
public static final long LIMIT_UNLIMITED = Long.MAX_VALUE;
public static final long DEFAULT_LIMIT = LIMIT_UNLIMITED;
final String poolName;
@Nullable
String ownerName;
@Nullable
String groupName;
@Nullable
FsPermission mode;
@Nullable
Long limit;
@Nullable
Long maxRelativeExpiryMs;
public CachePoolInfo(String poolName) {
this.poolName = poolName;
}
/**
* @return Name of the pool.
*/
public String getPoolName() {
return poolName;
}
/**
* @return The owner of the pool. Along with the group and mode, determines
* who has access to view and modify the pool.
*/
public String getOwnerName() {
return ownerName;
}
public CachePoolInfo setOwnerName(String ownerName) {
this.ownerName = ownerName;
return this;
}
/**
* @return The group of the pool. Along with the owner and mode, determines
* who has access to view and modify the pool.
*/
public String getGroupName() {
return groupName;
}
public CachePoolInfo setGroupName(String groupName) {
this.groupName = groupName;
return this;
}
/**
* @return Unix-style permissions of the pool. Along with the owner and group,
* determines who has access to view and modify the pool.
*/
public FsPermission getMode() {
return mode;
}
public CachePoolInfo setMode(FsPermission mode) {
this.mode = mode;
return this;
}
/**
* @return The maximum aggregate number of bytes that can be cached by
* directives in this pool.
*/
public Long getLimit() {
return limit;
}
public CachePoolInfo setLimit(Long bytes) {
this.limit = bytes;
return this;
}
/**
* @return The maximum relative expiration of directives of this pool in
* milliseconds
*/
public Long getMaxRelativeExpiryMs() {
return maxRelativeExpiryMs;
}
/**
* Set the maximum relative expiration of directives of this pool in
* milliseconds.
*
* @param ms in milliseconds
* @return This builder, for call chaining.
*/
public CachePoolInfo setMaxRelativeExpiryMs(Long ms) {
this.maxRelativeExpiryMs = ms;
return this;
}
public String toString() {
return new StringBuilder().append("{").
append("poolName:").append(poolName).
append(", ownerName:").append(ownerName).
append(", groupName:").append(groupName).
append(", mode:").append((mode == null) ? "null" :
String.format("0%03o", mode.toShort())).
append(", limit:").append(limit).
append(", maxRelativeExpiryMs:").append(maxRelativeExpiryMs).
append("}").toString();
}
@Override
public boolean equals(Object o) {
if (o == null) { return false; }
if (o == this) { return true; }
if (o.getClass() != getClass()) {
return false;
}
CachePoolInfo other = (CachePoolInfo)o;
return new EqualsBuilder().
append(poolName, other.poolName).
append(ownerName, other.ownerName).
append(groupName, other.groupName).
append(mode, other.mode).
append(limit, other.limit).
append(maxRelativeExpiryMs, other.maxRelativeExpiryMs).
isEquals();
}
@Override
public int hashCode() {
return new HashCodeBuilder().
append(poolName).
append(ownerName).
append(groupName).
append(mode).
append(limit).
append(maxRelativeExpiryMs).
hashCode();
}
public static void validate(CachePoolInfo info) throws IOException {
if (info == null) {
throw new InvalidRequestException("CachePoolInfo is null");
}
if ((info.getLimit() != null) && (info.getLimit() < 0)) {
throw new InvalidRequestException("Limit is negative.");
}
if (info.getMaxRelativeExpiryMs() != null) {
long maxRelativeExpiryMs = info.getMaxRelativeExpiryMs();
if (maxRelativeExpiryMs < 0l) {
throw new InvalidRequestException("Max relative expiry is negative.");
}
if (maxRelativeExpiryMs > Expiration.MAX_RELATIVE_EXPIRY_MS) {
throw new InvalidRequestException("Max relative expiry is too big.");
}
}
validateName(info.poolName);
}
public static void validateName(String poolName) throws IOException {
if (poolName == null || poolName.isEmpty()) {
// Empty pool names are not allowed because they would be highly
// confusing. They would also break the ability to list all pools
// by starting with prevKey = ""
throw new IOException("invalid empty cache pool name");
}
}
}
| 6,486 | 27.577093 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import java.util.List;
import java.util.Collections;
import java.util.Comparator;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileEncryptionInfo;
/**
* Collection of blocks with their locations and the file length.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class LocatedBlocks {
private final long fileLength;
private final List<LocatedBlock> blocks; // array of blocks with prioritized locations
private final boolean underConstruction;
private final LocatedBlock lastLocatedBlock;
private final boolean isLastBlockComplete;
private final FileEncryptionInfo fileEncryptionInfo;
public LocatedBlocks() {
fileLength = 0;
blocks = null;
underConstruction = false;
lastLocatedBlock = null;
isLastBlockComplete = false;
fileEncryptionInfo = null;
}
public LocatedBlocks(long flength, boolean isUnderConstuction,
List<LocatedBlock> blks, LocatedBlock lastBlock,
boolean isLastBlockCompleted, FileEncryptionInfo feInfo) {
fileLength = flength;
blocks = blks;
underConstruction = isUnderConstuction;
this.lastLocatedBlock = lastBlock;
this.isLastBlockComplete = isLastBlockCompleted;
this.fileEncryptionInfo = feInfo;
}
/**
* Get located blocks.
*/
public List<LocatedBlock> getLocatedBlocks() {
return blocks;
}
/** Get the last located block. */
public LocatedBlock getLastLocatedBlock() {
return lastLocatedBlock;
}
/** Is the last block completed? */
public boolean isLastBlockComplete() {
return isLastBlockComplete;
}
/**
* Get located block.
*/
public LocatedBlock get(int index) {
return blocks.get(index);
}
/**
* Get number of located blocks.
*/
public int locatedBlockCount() {
return blocks == null ? 0 : blocks.size();
}
/**
*
*/
public long getFileLength() {
return this.fileLength;
}
/**
* Return true if file was under construction when this LocatedBlocks was
* constructed, false otherwise.
*/
public boolean isUnderConstruction() {
return underConstruction;
}
/**
* @return the FileEncryptionInfo for the LocatedBlocks
*/
public FileEncryptionInfo getFileEncryptionInfo() {
return fileEncryptionInfo;
}
/**
* Find block containing specified offset.
*
* @return block if found, or null otherwise.
*/
public int findBlock(long offset) {
// create fake block of size 0 as a key
LocatedBlock key = new LocatedBlock(
new ExtendedBlock(), new DatanodeInfo[0]);
key.setStartOffset(offset);
key.getBlock().setNumBytes(1);
Comparator<LocatedBlock> comp =
new Comparator<LocatedBlock>() {
// Returns 0 iff a is inside b or b is inside a
@Override
public int compare(LocatedBlock a, LocatedBlock b) {
long aBeg = a.getStartOffset();
long bBeg = b.getStartOffset();
long aEnd = aBeg + a.getBlockSize();
long bEnd = bBeg + b.getBlockSize();
if(aBeg <= bBeg && bEnd <= aEnd
|| bBeg <= aBeg && aEnd <= bEnd)
return 0; // one of the blocks is inside the other
if(aBeg < bBeg)
return -1; // a's left bound is to the left of the b's
return 1;
}
};
return Collections.binarySearch(blocks, key, comp);
}
public void insertRange(int blockIdx, List<LocatedBlock> newBlocks) {
int oldIdx = blockIdx;
int insStart = 0, insEnd = 0;
for(int newIdx = 0; newIdx < newBlocks.size() && oldIdx < blocks.size();
newIdx++) {
long newOff = newBlocks.get(newIdx).getStartOffset();
long oldOff = blocks.get(oldIdx).getStartOffset();
if(newOff < oldOff) {
insEnd++;
} else if(newOff == oldOff) {
// replace old cached block by the new one
blocks.set(oldIdx, newBlocks.get(newIdx));
if(insStart < insEnd) { // insert new blocks
blocks.addAll(oldIdx, newBlocks.subList(insStart, insEnd));
oldIdx += insEnd - insStart;
}
insStart = insEnd = newIdx+1;
oldIdx++;
} else { // newOff > oldOff
assert false : "List of LocatedBlock must be sorted by startOffset";
}
}
insEnd = newBlocks.size();
if(insStart < insEnd) { // insert new blocks
blocks.addAll(oldIdx, newBlocks.subList(insStart, insEnd));
}
}
public static int getInsertIndex(int binSearchResult) {
return binSearchResult >= 0 ? binSearchResult : -(binSearchResult+1);
}
@Override
public String toString() {
final StringBuilder b = new StringBuilder(getClass().getSimpleName());
b.append("{")
.append("\n fileLength=").append(fileLength)
.append("\n underConstruction=").append(underConstruction)
.append("\n blocks=").append(blocks)
.append("\n lastLocatedBlock=").append(lastLocatedBlock)
.append("\n isLastBlockComplete=").append(isLastBlockComplete)
.append("}");
return b.toString();
}
}
| 5,986 | 30.510526 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeLocalInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Locally available datanode information
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class DatanodeLocalInfo {
private final String softwareVersion;
private final String configVersion;
private final long uptime; // datanode uptime in seconds.
public DatanodeLocalInfo(String softwareVersion,
String configVersion, long uptime) {
this.softwareVersion = softwareVersion;
this.configVersion = configVersion;
this.uptime = uptime;
}
/** get software version */
public String getSoftwareVersion() {
return this.softwareVersion;
}
/** get config version */
public String getConfigVersion() {
return this.configVersion;
}
/** get uptime */
public long getUptime() {
return this.uptime;
}
/** A formatted string for printing the status of the DataNode. */
public String getDatanodeLocalReport() {
StringBuilder buffer = new StringBuilder();
buffer.append("Uptime: " + getUptime());
buffer.append(", Software version: " + getSoftwareVersion());
buffer.append(", Config version: " + getConfigVersion());
return buffer.toString();
}
}
| 2,111 | 31.492308 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotDiffReport.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import org.apache.hadoop.fs.Path;
import com.google.common.base.Objects;
import org.apache.hadoop.hdfs.DFSUtilClient;
/**
* This class represents to end users the difference between two snapshots of
* the same directory, or the difference between a snapshot of the directory and
* its current state. Instead of capturing all the details of the diff, this
* class only lists where the changes happened and their types.
*/
public class SnapshotDiffReport {
private final static String LINE_SEPARATOR = System.getProperty(
"line.separator", "\n");
/**
* Types of the difference, which include CREATE, MODIFY, DELETE, and RENAME.
* Each type has a label for representation: +/M/-/R represent CREATE, MODIFY,
* DELETE, and RENAME respectively.
*/
public enum DiffType {
CREATE("+"),
MODIFY("M"),
DELETE("-"),
RENAME("R");
private final String label;
private DiffType(String label) {
this.label = label;
}
public String getLabel() {
return label;
}
public static DiffType getTypeFromLabel(String label) {
if (label.equals(CREATE.getLabel())) {
return CREATE;
} else if (label.equals(MODIFY.getLabel())) {
return MODIFY;
} else if (label.equals(DELETE.getLabel())) {
return DELETE;
} else if (label.equals(RENAME.getLabel())) {
return RENAME;
}
return null;
}
};
/**
* Representing the full path and diff type of a file/directory where changes
* have happened.
*/
public static class DiffReportEntry {
/** The type of the difference. */
private final DiffType type;
/**
* The relative path (related to the snapshot root) of 1) the file/directory
* where changes have happened, or 2) the source file/dir of a rename op.
*/
private final byte[] sourcePath;
private final byte[] targetPath;
public DiffReportEntry(DiffType type, byte[] sourcePath) {
this(type, sourcePath, null);
}
public DiffReportEntry(DiffType type, byte[][] sourcePathComponents) {
this(type, sourcePathComponents, null);
}
public DiffReportEntry(DiffType type, byte[] sourcePath, byte[] targetPath) {
this.type = type;
this.sourcePath = sourcePath;
this.targetPath = targetPath;
}
public DiffReportEntry(DiffType type, byte[][] sourcePathComponents,
byte[][] targetPathComponents) {
this.type = type;
this.sourcePath = DFSUtilClient.byteArray2bytes(sourcePathComponents);
this.targetPath = targetPathComponents == null ? null : DFSUtilClient
.byteArray2bytes(targetPathComponents);
}
@Override
public String toString() {
String str = type.getLabel() + "\t" + getPathString(sourcePath);
if (type == DiffType.RENAME) {
str += " -> " + getPathString(targetPath);
}
return str;
}
public DiffType getType() {
return type;
}
static String getPathString(byte[] path) {
String pathStr = DFSUtilClient.bytes2String(path);
if (pathStr.isEmpty()) {
return Path.CUR_DIR;
} else {
return Path.CUR_DIR + Path.SEPARATOR + pathStr;
}
}
public byte[] getSourcePath() {
return sourcePath;
}
public byte[] getTargetPath() {
return targetPath;
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other != null && other instanceof DiffReportEntry) {
DiffReportEntry entry = (DiffReportEntry) other;
return type.equals(entry.getType())
&& Arrays.equals(sourcePath, entry.getSourcePath())
&& Arrays.equals(targetPath, entry.getTargetPath());
}
return false;
}
@Override
public int hashCode() {
return Objects.hashCode(getSourcePath(), getTargetPath());
}
}
/** snapshot root full path */
private final String snapshotRoot;
/** start point of the diff */
private final String fromSnapshot;
/** end point of the diff */
private final String toSnapshot;
/** list of diff */
private final List<DiffReportEntry> diffList;
public SnapshotDiffReport(String snapshotRoot, String fromSnapshot,
String toSnapshot, List<DiffReportEntry> entryList) {
this.snapshotRoot = snapshotRoot;
this.fromSnapshot = fromSnapshot;
this.toSnapshot = toSnapshot;
this.diffList = entryList != null ? entryList : Collections
.<DiffReportEntry> emptyList();
}
/** @return {@link #snapshotRoot}*/
public String getSnapshotRoot() {
return snapshotRoot;
}
/** @return {@link #fromSnapshot} */
public String getFromSnapshot() {
return fromSnapshot;
}
/** @return {@link #toSnapshot} */
public String getLaterSnapshotName() {
return toSnapshot;
}
/** @return {@link #diffList} */
public List<DiffReportEntry> getDiffList() {
return diffList;
}
@Override
public String toString() {
StringBuilder str = new StringBuilder();
String from = fromSnapshot == null || fromSnapshot.isEmpty() ?
"current directory" : "snapshot " + fromSnapshot;
String to = toSnapshot == null || toSnapshot.isEmpty() ? "current directory"
: "snapshot " + toSnapshot;
str.append("Difference between " + from + " and " + to
+ " under directory " + snapshotRoot + ":" + LINE_SEPARATOR);
for (DiffReportEntry entry : diffList) {
str.append(entry.toString() + LINE_SEPARATOR);
}
return str.toString();
}
}
| 6,534 | 29.25463 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LastBlockWithStatus.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Class to contain Lastblock and HdfsFileStatus for the Append operation
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class LastBlockWithStatus {
private final LocatedBlock lastBlock;
private final HdfsFileStatus fileStatus;
public LastBlockWithStatus(LocatedBlock lastBlock, HdfsFileStatus fileStatus) {
this.lastBlock = lastBlock;
this.fileStatus = fileStatus;
}
public LocatedBlock getLastBlock() {
return lastBlock;
}
public HdfsFileStatus getFileStatus() {
return fileStatus;
}
}
| 1,518 | 31.319149 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfoWithStorage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class DatanodeInfoWithStorage extends DatanodeInfo {
private final String storageID;
private final StorageType storageType;
public DatanodeInfoWithStorage(DatanodeInfo from, String storageID,
StorageType storageType) {
super(from);
this.storageID = storageID;
this.storageType = storageType;
setSoftwareVersion(from.getSoftwareVersion());
setDependentHostNames(from.getDependentHostNames());
setLevel(from.getLevel());
setParent(from.getParent());
}
public String getStorageID() {
return storageID;
}
public StorageType getStorageType() {
return storageType;
}
@Override
public boolean equals(Object o) {
// allows this class to be used interchangeably with DatanodeInfo
return super.equals(o);
}
@Override
public int hashCode() {
// allows this class to be used interchangeably with DatanodeInfo
return super.hashCode();
}
@Override
public String toString() {
return "DatanodeInfoWithStorage[" + super.toString() + "," + storageID +
"," + storageType + "]";
}
}
| 2,226 | 31.75 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveStats.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Describes a path-based cache directive.
*/
@InterfaceStability.Evolving
@InterfaceAudience.Public
public class CacheDirectiveStats {
public static class Builder {
private long bytesNeeded;
private long bytesCached;
private long filesNeeded;
private long filesCached;
private boolean hasExpired;
/**
* Builds a new CacheDirectiveStats populated with the set properties.
*
* @return New CacheDirectiveStats.
*/
public CacheDirectiveStats build() {
return new CacheDirectiveStats(bytesNeeded, bytesCached, filesNeeded,
filesCached, hasExpired);
}
/**
* Creates an empty builder.
*/
public Builder() {
}
/**
* Sets the bytes needed by this directive.
*
* @param bytesNeeded The bytes needed.
* @return This builder, for call chaining.
*/
public Builder setBytesNeeded(long bytesNeeded) {
this.bytesNeeded = bytesNeeded;
return this;
}
/**
* Sets the bytes cached by this directive.
*
* @param bytesCached The bytes cached.
* @return This builder, for call chaining.
*/
public Builder setBytesCached(long bytesCached) {
this.bytesCached = bytesCached;
return this;
}
/**
* Sets the files needed by this directive.
* @param filesNeeded The number of files needed
* @return This builder, for call chaining.
*/
public Builder setFilesNeeded(long filesNeeded) {
this.filesNeeded = filesNeeded;
return this;
}
/**
* Sets the files cached by this directive.
*
* @param filesCached The number of files cached.
* @return This builder, for call chaining.
*/
public Builder setFilesCached(long filesCached) {
this.filesCached = filesCached;
return this;
}
/**
* Sets whether this directive has expired.
*
* @param hasExpired if this directive has expired
* @return This builder, for call chaining.
*/
public Builder setHasExpired(boolean hasExpired) {
this.hasExpired = hasExpired;
return this;
}
}
private final long bytesNeeded;
private final long bytesCached;
private final long filesNeeded;
private final long filesCached;
private final boolean hasExpired;
private CacheDirectiveStats(long bytesNeeded, long bytesCached,
long filesNeeded, long filesCached, boolean hasExpired) {
this.bytesNeeded = bytesNeeded;
this.bytesCached = bytesCached;
this.filesNeeded = filesNeeded;
this.filesCached = filesCached;
this.hasExpired = hasExpired;
}
/**
* @return The bytes needed.
*/
public long getBytesNeeded() {
return bytesNeeded;
}
/**
* @return The bytes cached.
*/
public long getBytesCached() {
return bytesCached;
}
/**
* @return The number of files needed.
*/
public long getFilesNeeded() {
return filesNeeded;
}
/**
* @return The number of files cached.
*/
public long getFilesCached() {
return filesCached;
}
/**
* @return Whether this directive has expired.
*/
public boolean hasExpired() {
return hasExpired;
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("{");
builder.append("bytesNeeded: ").append(bytesNeeded);
builder.append(", ").append("bytesCached: ").append(bytesCached);
builder.append(", ").append("filesNeeded: ").append(filesNeeded);
builder.append(", ").append("filesCached: ").append(filesCached);
builder.append(", ").append("hasExpired: ").append(hasExpired);
builder.append("}");
return builder.toString();
}
};
| 4,668 | 26.464706 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import java.util.HashMap;
import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.util.StringUtils;
@InterfaceAudience.Private
public class HdfsConstants {
// Long that indicates "leave current quota unchanged"
public static final long QUOTA_DONT_SET = Long.MAX_VALUE;
public static final long QUOTA_RESET = -1L;
public static final int BYTES_IN_INTEGER = Integer.SIZE / Byte.SIZE;
/**
* URI Scheme for hdfs://namenode/ URIs.
*/
public static final String HDFS_URI_SCHEME = "hdfs";
public static final String MEMORY_STORAGE_POLICY_NAME = "LAZY_PERSIST";
public static final String ALLSSD_STORAGE_POLICY_NAME = "ALL_SSD";
public static final String ONESSD_STORAGE_POLICY_NAME = "ONE_SSD";
public static final String HOT_STORAGE_POLICY_NAME = "HOT";
public static final String WARM_STORAGE_POLICY_NAME = "WARM";
public static final String COLD_STORAGE_POLICY_NAME = "COLD";
// TODO should be conf injected?
public static final int DEFAULT_DATA_SOCKET_SIZE = 128 * 1024;
/**
* A special path component contained in the path for a snapshot file/dir
*/
public static final String DOT_SNAPSHOT_DIR = ".snapshot";
public static final String SEPARATOR_DOT_SNAPSHOT_DIR
= Path.SEPARATOR + DOT_SNAPSHOT_DIR;
public static final String SEPARATOR_DOT_SNAPSHOT_DIR_SEPARATOR
= Path.SEPARATOR + DOT_SNAPSHOT_DIR + Path.SEPARATOR;
/**
* Generation stamp of blocks that pre-date the introduction
* of a generation stamp.
*/
public static final long GRANDFATHER_GENERATION_STAMP = 0;
/**
* The inode id validation of lease check will be skipped when the request
* uses GRANDFATHER_INODE_ID for backward compatibility.
*/
public static final long GRANDFATHER_INODE_ID = 0;
public static final byte BLOCK_STORAGE_POLICY_ID_UNSPECIFIED = 0;
/**
* A prefix put before the namenode URI inside the "service" field
* of a delgation token, indicating that the URI is a logical (HA)
* URI.
*/
public static final String HA_DT_SERVICE_PREFIX = "ha-";
// The name of the SafeModeException. FileSystem should retry if it sees
// the below exception in RPC
public static final String SAFEMODE_EXCEPTION_CLASS_NAME =
"org.apache.hadoop.hdfs.server.namenode.SafeModeException";
/**
* HDFS Protocol Names:
*/
public static final String CLIENT_NAMENODE_PROTOCOL_NAME =
"org.apache.hadoop.hdfs.protocol.ClientProtocol";
// SafeMode actions
public enum SafeModeAction {
SAFEMODE_LEAVE, SAFEMODE_ENTER, SAFEMODE_GET
}
public enum RollingUpgradeAction {
QUERY, PREPARE, FINALIZE;
private static final Map<String, RollingUpgradeAction> MAP
= new HashMap<>();
static {
MAP.put("", QUERY);
for(RollingUpgradeAction a : values()) {
MAP.put(a.name(), a);
}
}
/** Covert the given String to a RollingUpgradeAction. */
public static RollingUpgradeAction fromString(String s) {
return MAP.get(StringUtils.toUpperCase(s));
}
}
// type of the datanode report
public enum DatanodeReportType {
ALL, LIVE, DEAD, DECOMMISSIONING
}
/* Hidden constructor */
protected HdfsConstants() {
}
}
| 4,112 | 35.39823 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import java.io.PrintStream;
import java.text.SimpleDateFormat;
import java.util.Comparator;
import java.util.Date;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSUtilClient;
/**
* Metadata about a snapshottable directory
*/
public class SnapshottableDirectoryStatus {
/** Compare the statuses by full paths. */
public static final Comparator<SnapshottableDirectoryStatus> COMPARATOR
= new Comparator<SnapshottableDirectoryStatus>() {
@Override
public int compare(SnapshottableDirectoryStatus left,
SnapshottableDirectoryStatus right) {
int d = DFSUtilClient.compareBytes(left.parentFullPath, right.parentFullPath);
return d != 0? d
: DFSUtilClient.compareBytes(left.dirStatus.getLocalNameInBytes(),
right.dirStatus.getLocalNameInBytes());
}
};
/** Basic information of the snapshottable directory */
private final HdfsFileStatus dirStatus;
/** Number of snapshots that have been taken*/
private final int snapshotNumber;
/** Number of snapshots allowed. */
private final int snapshotQuota;
/** Full path of the parent. */
private final byte[] parentFullPath;
public SnapshottableDirectoryStatus(long modification_time, long access_time,
FsPermission permission, String owner, String group, byte[] localName,
long inodeId, int childrenNum,
int snapshotNumber, int snapshotQuota, byte[] parentFullPath) {
this.dirStatus = new HdfsFileStatus(0, true, 0, 0, modification_time,
access_time, permission, owner, group, null, localName, inodeId,
childrenNum, null, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED);
this.snapshotNumber = snapshotNumber;
this.snapshotQuota = snapshotQuota;
this.parentFullPath = parentFullPath;
}
/**
* @return Number of snapshots that have been taken for the directory
*/
public int getSnapshotNumber() {
return snapshotNumber;
}
/**
* @return Number of snapshots allowed for the directory
*/
public int getSnapshotQuota() {
return snapshotQuota;
}
/**
* @return Full path of the parent
*/
public byte[] getParentFullPath() {
return parentFullPath;
}
/**
* @return The basic information of the directory
*/
public HdfsFileStatus getDirStatus() {
return dirStatus;
}
/**
* @return Full path of the file
*/
public Path getFullPath() {
String parentFullPathStr =
(parentFullPath == null || parentFullPath.length == 0) ?
null : DFSUtilClient.bytes2String(parentFullPath);
if (parentFullPathStr == null
&& dirStatus.getLocalNameInBytes().length == 0) {
// root
return new Path("/");
} else {
return parentFullPathStr == null ? new Path(dirStatus.getLocalName())
: new Path(parentFullPathStr, dirStatus.getLocalName());
}
}
/**
* Print a list of {@link SnapshottableDirectoryStatus} out to a given stream.
* @param stats The list of {@link SnapshottableDirectoryStatus}
* @param out The given stream for printing.
*/
public static void print(SnapshottableDirectoryStatus[] stats,
PrintStream out) {
if (stats == null || stats.length == 0) {
out.println();
return;
}
int maxRepl = 0, maxLen = 0, maxOwner = 0, maxGroup = 0;
int maxSnapshotNum = 0, maxSnapshotQuota = 0;
for (SnapshottableDirectoryStatus status : stats) {
maxRepl = maxLength(maxRepl, status.dirStatus.getReplication());
maxLen = maxLength(maxLen, status.dirStatus.getLen());
maxOwner = maxLength(maxOwner, status.dirStatus.getOwner());
maxGroup = maxLength(maxGroup, status.dirStatus.getGroup());
maxSnapshotNum = maxLength(maxSnapshotNum, status.snapshotNumber);
maxSnapshotQuota = maxLength(maxSnapshotQuota, status.snapshotQuota);
}
StringBuilder fmt = new StringBuilder();
fmt.append("%s%s "); // permission string
fmt.append("%" + maxRepl + "s ");
fmt.append((maxOwner > 0) ? "%-" + maxOwner + "s " : "%s");
fmt.append((maxGroup > 0) ? "%-" + maxGroup + "s " : "%s");
fmt.append("%" + maxLen + "s ");
fmt.append("%s "); // mod time
fmt.append("%" + maxSnapshotNum + "s ");
fmt.append("%" + maxSnapshotQuota + "s ");
fmt.append("%s"); // path
String lineFormat = fmt.toString();
SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm");
for (SnapshottableDirectoryStatus status : stats) {
String line = String.format(lineFormat, "d",
status.dirStatus.getPermission(),
status.dirStatus.getReplication(),
status.dirStatus.getOwner(),
status.dirStatus.getGroup(),
String.valueOf(status.dirStatus.getLen()),
dateFormat.format(new Date(status.dirStatus.getModificationTime())),
status.snapshotNumber, status.snapshotQuota,
status.getFullPath().toString()
);
out.println(line);
}
}
private static int maxLength(int n, Object value) {
return Math.max(n, String.valueOf(value).length());
}
public static class Bean {
private final String path;
private final int snapshotNumber;
private final int snapshotQuota;
private final long modificationTime;
private final short permission;
private final String owner;
private final String group;
public Bean(String path, int snapshotNumber, int snapshotQuota,
long modificationTime, short permission, String owner, String group) {
this.path = path;
this.snapshotNumber = snapshotNumber;
this.snapshotQuota = snapshotQuota;
this.modificationTime = modificationTime;
this.permission = permission;
this.owner = owner;
this.group = group;
}
public String getPath() {
return path;
}
public int getSnapshotNumber() {
return snapshotNumber;
}
public int getSnapshotQuota() {
return snapshotQuota;
}
public long getModificationTime() {
return modificationTime;
}
public short getPermission() {
return permission;
}
public String getOwner() {
return owner;
}
public String getGroup() {
return group;
}
}
}
| 7,176 | 31.771689 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AlreadyBeingCreatedException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* The exception that happens when you ask to create a file that already
* is being created, but is not closed yet.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class AlreadyBeingCreatedException extends IOException {
static final long serialVersionUID = 0x12308AD009L;
public AlreadyBeingCreatedException(String msg) {
super(msg);
}
}
| 1,372 | 35.131579 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/NSQuotaExceededException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@InterfaceAudience.Private
@InterfaceStability.Evolving
public final class NSQuotaExceededException extends QuotaExceededException {
protected static final long serialVersionUID = 1L;
private String prefix;
public NSQuotaExceededException() {}
public NSQuotaExceededException(String msg) {
super(msg);
}
public NSQuotaExceededException(long quota, long count) {
super(quota, count);
}
@Override
public String getMessage() {
String msg = super.getMessage();
if (msg == null) {
msg = "The NameSpace quota (directories and files)" +
(pathName==null?"":(" of directory " + pathName)) +
" is exceeded: quota=" + quota + " file count=" + count;
if (prefix != null) {
msg = prefix + ": " + msg;
}
}
return msg;
}
/** Set a prefix for the error message. */
public void setMessagePrefix(final String prefix) {
this.prefix = prefix;
}
}
| 1,900 | 30.163934 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Identifies a Block uniquely across the block pools
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class ExtendedBlock {
private String poolId;
private Block block;
public ExtendedBlock() {
this(null, 0, 0, 0);
}
public ExtendedBlock(final ExtendedBlock b) {
this(b.poolId, new Block(b.block));
}
public ExtendedBlock(final String poolId, final long blockId) {
this(poolId, blockId, 0, 0);
}
public ExtendedBlock(String poolId, Block b) {
this.poolId = poolId;
this.block = b;
}
public ExtendedBlock(final String poolId, final long blkid, final long len,
final long genstamp) {
this.poolId = poolId;
block = new Block(blkid, len, genstamp);
}
public String getBlockPoolId() {
return poolId;
}
/** Returns the block file name for the block */
public String getBlockName() {
return block.getBlockName();
}
public long getNumBytes() {
return block.getNumBytes();
}
public long getBlockId() {
return block.getBlockId();
}
public long getGenerationStamp() {
return block.getGenerationStamp();
}
public void setBlockId(final long bid) {
block.setBlockId(bid);
}
public void setGenerationStamp(final long genStamp) {
block.setGenerationStamp(genStamp);
}
public void setNumBytes(final long len) {
block.setNumBytes(len);
}
public void set(String poolId, Block blk) {
this.poolId = poolId;
this.block = blk;
}
public static Block getLocalBlock(final ExtendedBlock b) {
return b == null ? null : b.getLocalBlock();
}
public Block getLocalBlock() {
return block;
}
@Override // Object
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof ExtendedBlock)) {
return false;
}
ExtendedBlock b = (ExtendedBlock)o;
return b.block.equals(block) && b.poolId.equals(poolId);
}
@Override // Object
public int hashCode() {
int result = 31 + poolId.hashCode();
return (31 * result + block.hashCode());
}
@Override // Object
public String toString() {
return poolId + ":" + block;
}
}
| 3,125 | 24.209677 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import java.io.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.*;
/**************************************************
* A Block is a Hadoop FS primitive, identified by a
* long.
*
**************************************************/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class Block implements Writable, Comparable<Block> {
public static final String BLOCK_FILE_PREFIX = "blk_";
public static final String METADATA_EXTENSION = ".meta";
static { // register a ctor
WritableFactories.setFactory
(Block.class,
new WritableFactory() {
@Override
public Writable newInstance() { return new Block(); }
});
}
public static final Pattern blockFilePattern = Pattern
.compile(BLOCK_FILE_PREFIX + "(-??\\d++)$");
public static final Pattern metaFilePattern = Pattern
.compile(BLOCK_FILE_PREFIX + "(-??\\d++)_(\\d++)\\" + METADATA_EXTENSION
+ "$");
public static final Pattern metaOrBlockFilePattern = Pattern
.compile(BLOCK_FILE_PREFIX + "(-??\\d++)(_(\\d++)\\" + METADATA_EXTENSION
+ ")?$");
public static boolean isBlockFilename(File f) {
String name = f.getName();
return blockFilePattern.matcher(name).matches();
}
public static long filename2id(String name) {
Matcher m = blockFilePattern.matcher(name);
return m.matches() ? Long.parseLong(m.group(1)) : 0;
}
public static boolean isMetaFilename(String name) {
return metaFilePattern.matcher(name).matches();
}
public static File metaToBlockFile(File metaFile) {
return new File(metaFile.getParent(), metaFile.getName().substring(
0, metaFile.getName().lastIndexOf('_')));
}
/**
* Get generation stamp from the name of the metafile name
*/
public static long getGenerationStamp(String metaFile) {
Matcher m = metaFilePattern.matcher(metaFile);
return m.matches() ? Long.parseLong(m.group(2))
: HdfsConstants.GRANDFATHER_GENERATION_STAMP;
}
/**
* Get the blockId from the name of the meta or block file
*/
public static long getBlockId(String metaOrBlockFile) {
Matcher m = metaOrBlockFilePattern.matcher(metaOrBlockFile);
return m.matches() ? Long.parseLong(m.group(1)) : 0;
}
private long blockId;
private long numBytes;
private long generationStamp;
public Block() {this(0, 0, 0);}
public Block(final long blkid, final long len, final long generationStamp) {
set(blkid, len, generationStamp);
}
public Block(final long blkid) {
this(blkid, 0, HdfsConstants.GRANDFATHER_GENERATION_STAMP);
}
public Block(Block blk) {
this(blk.blockId, blk.numBytes, blk.generationStamp);
}
/**
* Find the blockid from the given filename
*/
public Block(File f, long len, long genstamp) {
this(filename2id(f.getName()), len, genstamp);
}
public void set(long blkid, long len, long genStamp) {
this.blockId = blkid;
this.numBytes = len;
this.generationStamp = genStamp;
}
/**
*/
public long getBlockId() {
return blockId;
}
public void setBlockId(long bid) {
blockId = bid;
}
/**
*/
public String getBlockName() {
return BLOCK_FILE_PREFIX + String.valueOf(blockId);
}
/**
*/
public long getNumBytes() {
return numBytes;
}
public void setNumBytes(long len) {
this.numBytes = len;
}
public long getGenerationStamp() {
return generationStamp;
}
public void setGenerationStamp(long stamp) {
generationStamp = stamp;
}
/**
*/
@Override
public String toString() {
return getBlockName() + "_" + getGenerationStamp();
}
public void appendStringTo(StringBuilder sb) {
sb.append(BLOCK_FILE_PREFIX)
.append(blockId)
.append("_")
.append(getGenerationStamp());
}
/////////////////////////////////////
// Writable
/////////////////////////////////////
@Override // Writable
public void write(DataOutput out) throws IOException {
writeHelper(out);
}
@Override // Writable
public void readFields(DataInput in) throws IOException {
readHelper(in);
}
final void writeHelper(DataOutput out) throws IOException {
out.writeLong(blockId);
out.writeLong(numBytes);
out.writeLong(generationStamp);
}
final void readHelper(DataInput in) throws IOException {
this.blockId = in.readLong();
this.numBytes = in.readLong();
this.generationStamp = in.readLong();
if (numBytes < 0) {
throw new IOException("Unexpected block size: " + numBytes);
}
}
// write only the identifier part of the block
public void writeId(DataOutput out) throws IOException {
out.writeLong(blockId);
out.writeLong(generationStamp);
}
// Read only the identifier part of the block
public void readId(DataInput in) throws IOException {
this.blockId = in.readLong();
this.generationStamp = in.readLong();
}
@Override // Comparable
public int compareTo(Block b) {
return blockId < b.blockId ? -1 :
blockId > b.blockId ? 1 : 0;
}
@Override // Object
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof Block)) {
return false;
}
return compareTo((Block)o) == 0;
}
/**
* @return true if the two blocks have the same block ID and the same
* generation stamp, or if both blocks are null.
*/
public static boolean matchingIdAndGenStamp(Block a, Block b) {
if (a == b) return true; // same block, or both null
if (a == null || b == null) return false; // only one null
return a.blockId == b.blockId &&
a.generationStamp == b.generationStamp;
}
@Override // Object
public int hashCode() {
//GenerationStamp is IRRELEVANT and should not be used here
return (int)(blockId^(blockId>>>32));
}
}
| 6,874 | 27.17623 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolEntry.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Describes a Cache Pool entry.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class CachePoolEntry {
private final CachePoolInfo info;
private final CachePoolStats stats;
public CachePoolEntry(CachePoolInfo info, CachePoolStats stats) {
this.info = info;
this.stats = stats;
}
public CachePoolInfo getInfo() {
return info;
}
public CachePoolStats getStats() {
return stats;
}
}
| 1,408 | 29.630435 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZone.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import org.apache.commons.lang.builder.EqualsBuilder;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.crypto.CipherSuite;
import org.apache.hadoop.crypto.CryptoProtocolVersion;
/**
* A simple class for representing an encryption zone. Presently an encryption
* zone only has a path (the root of the encryption zone), a key name, and a
* unique id. The id is used to implement batched listing of encryption zones.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class EncryptionZone {
private final long id;
private final String path;
private final CipherSuite suite;
private final CryptoProtocolVersion version;
private final String keyName;
public EncryptionZone(long id, String path, CipherSuite suite,
CryptoProtocolVersion version, String keyName) {
this.id = id;
this.path = path;
this.suite = suite;
this.version = version;
this.keyName = keyName;
}
public long getId() {
return id;
}
public String getPath() {
return path;
}
public CipherSuite getSuite() {
return suite;
}
public CryptoProtocolVersion getVersion() { return version; }
public String getKeyName() {
return keyName;
}
@Override
public int hashCode() {
return new HashCodeBuilder(13, 31)
.append(id)
.append(path)
.append(suite)
.append(version)
.append(keyName).
toHashCode();
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (obj == this) {
return true;
}
if (obj.getClass() != getClass()) {
return false;
}
EncryptionZone rhs = (EncryptionZone) obj;
return new EqualsBuilder().
append(id, rhs.id).
append(path, rhs.path).
append(suite, rhs.suite).
append(version, rhs.version).
append(keyName, rhs.keyName).
isEquals();
}
@Override
public String toString() {
return "EncryptionZone [id=" + id +
", path=" + path +
", suite=" + suite +
", version=" + version +
", keyName=" + keyName + "]";
}
}
| 3,102 | 26.954955 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node;
import org.apache.hadoop.net.NodeBase;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import java.util.Date;
import java.util.LinkedList;
import java.util.List;
import static org.apache.hadoop.hdfs.DFSUtilClient.percent2String;
/**
* This class extends the primary identifier of a Datanode with ephemeral
* state, eg usage information, current administrative state, and the
* network location that is communicated to clients.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class DatanodeInfo extends DatanodeID implements Node {
private long capacity;
private long dfsUsed;
private long remaining;
private long blockPoolUsed;
private long cacheCapacity;
private long cacheUsed;
private long lastUpdate;
private long lastUpdateMonotonic;
private int xceiverCount;
private String location = NetworkTopology.DEFAULT_RACK;
private String softwareVersion;
private List<String> dependentHostNames = new LinkedList<String>();
// Datanode administrative states
public enum AdminStates {
NORMAL("In Service"),
DECOMMISSION_INPROGRESS("Decommission In Progress"),
DECOMMISSIONED("Decommissioned");
final String value;
AdminStates(final String v) {
this.value = v;
}
@Override
public String toString() {
return value;
}
public static AdminStates fromValue(final String value) {
for (AdminStates as : AdminStates.values()) {
if (as.value.equals(value)) return as;
}
return NORMAL;
}
}
protected AdminStates adminState;
public DatanodeInfo(DatanodeInfo from) {
super(from);
this.capacity = from.getCapacity();
this.dfsUsed = from.getDfsUsed();
this.remaining = from.getRemaining();
this.blockPoolUsed = from.getBlockPoolUsed();
this.cacheCapacity = from.getCacheCapacity();
this.cacheUsed = from.getCacheUsed();
this.lastUpdate = from.getLastUpdate();
this.lastUpdateMonotonic = from.getLastUpdateMonotonic();
this.xceiverCount = from.getXceiverCount();
this.location = from.getNetworkLocation();
this.adminState = from.getAdminState();
}
public DatanodeInfo(DatanodeID nodeID) {
super(nodeID);
this.capacity = 0L;
this.dfsUsed = 0L;
this.remaining = 0L;
this.blockPoolUsed = 0L;
this.cacheCapacity = 0L;
this.cacheUsed = 0L;
this.lastUpdate = 0L;
this.lastUpdateMonotonic = 0L;
this.xceiverCount = 0;
this.adminState = null;
}
public DatanodeInfo(DatanodeID nodeID, String location) {
this(nodeID);
this.location = location;
}
public DatanodeInfo(DatanodeID nodeID, String location,
final long capacity, final long dfsUsed, final long remaining,
final long blockPoolUsed, final long cacheCapacity, final long cacheUsed,
final long lastUpdate, final long lastUpdateMonotonic,
final int xceiverCount, final AdminStates adminState) {
this(nodeID.getIpAddr(), nodeID.getHostName(), nodeID.getDatanodeUuid(),
nodeID.getXferPort(), nodeID.getInfoPort(), nodeID.getInfoSecurePort(),
nodeID.getIpcPort(), capacity, dfsUsed, remaining, blockPoolUsed,
cacheCapacity, cacheUsed, lastUpdate, lastUpdateMonotonic,
xceiverCount, location, adminState);
}
/** Constructor */
public DatanodeInfo(final String ipAddr, final String hostName,
final String datanodeUuid, final int xferPort, final int infoPort,
final int infoSecurePort, final int ipcPort,
final long capacity, final long dfsUsed, final long remaining,
final long blockPoolUsed, final long cacheCapacity, final long cacheUsed,
final long lastUpdate, final long lastUpdateMonotonic,
final int xceiverCount, final String networkLocation,
final AdminStates adminState) {
super(ipAddr, hostName, datanodeUuid, xferPort, infoPort,
infoSecurePort, ipcPort);
this.capacity = capacity;
this.dfsUsed = dfsUsed;
this.remaining = remaining;
this.blockPoolUsed = blockPoolUsed;
this.cacheCapacity = cacheCapacity;
this.cacheUsed = cacheUsed;
this.lastUpdate = lastUpdate;
this.lastUpdateMonotonic = lastUpdateMonotonic;
this.xceiverCount = xceiverCount;
this.location = networkLocation;
this.adminState = adminState;
}
/** Network location name */
@Override
public String getName() {
return getXferAddr();
}
/** The raw capacity. */
public long getCapacity() { return capacity; }
/** The used space by the data node. */
public long getDfsUsed() { return dfsUsed; }
/** The used space by the block pool on data node. */
public long getBlockPoolUsed() { return blockPoolUsed; }
/** The used space by the data node. */
public long getNonDfsUsed() {
long nonDFSUsed = capacity - dfsUsed - remaining;
return nonDFSUsed < 0 ? 0 : nonDFSUsed;
}
/** The used space by the data node as percentage of present capacity */
public float getDfsUsedPercent() {
return DFSUtilClient.getPercentUsed(dfsUsed, capacity);
}
/** The raw free space. */
public long getRemaining() { return remaining; }
/** Used space by the block pool as percentage of present capacity */
public float getBlockPoolUsedPercent() {
return DFSUtilClient.getPercentUsed(blockPoolUsed, capacity);
}
/** The remaining space as percentage of configured capacity. */
public float getRemainingPercent() {
return DFSUtilClient.getPercentRemaining(remaining, capacity);
}
/**
* @return Amount of cache capacity in bytes
*/
public long getCacheCapacity() {
return cacheCapacity;
}
/**
* @return Amount of cache used in bytes
*/
public long getCacheUsed() {
return cacheUsed;
}
/**
* @return Cache used as a percentage of the datanode's total cache capacity
*/
public float getCacheUsedPercent() {
return DFSUtilClient.getPercentUsed(cacheUsed, cacheCapacity);
}
/**
* @return Amount of cache remaining in bytes
*/
public long getCacheRemaining() {
return cacheCapacity - cacheUsed;
}
/**
* @return Cache remaining as a percentage of the datanode's total cache
* capacity
*/
public float getCacheRemainingPercent() {
return DFSUtilClient.getPercentRemaining(getCacheRemaining(), cacheCapacity);
}
/**
* Get the last update timestamp.
* Return value is suitable for Date conversion.
*/
public long getLastUpdate() { return lastUpdate; }
/**
* The time when this information was accurate. <br>
* Ps: So return value is ideal for calculation of time differences.
* Should not be used to convert to Date.
*/
public long getLastUpdateMonotonic() { return lastUpdateMonotonic;}
/**
* Set lastUpdate monotonic time
*/
public void setLastUpdateMonotonic(long lastUpdateMonotonic) {
this.lastUpdateMonotonic = lastUpdateMonotonic;
}
/** number of active connections */
public int getXceiverCount() { return xceiverCount; }
/** Sets raw capacity. */
public void setCapacity(long capacity) {
this.capacity = capacity;
}
/** Sets the used space for the datanode. */
public void setDfsUsed(long dfsUsed) {
this.dfsUsed = dfsUsed;
}
/** Sets raw free space. */
public void setRemaining(long remaining) {
this.remaining = remaining;
}
/** Sets block pool used space */
public void setBlockPoolUsed(long bpUsed) {
this.blockPoolUsed = bpUsed;
}
/** Sets cache capacity. */
public void setCacheCapacity(long cacheCapacity) {
this.cacheCapacity = cacheCapacity;
}
/** Sets cache used. */
public void setCacheUsed(long cacheUsed) {
this.cacheUsed = cacheUsed;
}
/** Sets time when this information was accurate. */
public void setLastUpdate(long lastUpdate) {
this.lastUpdate = lastUpdate;
}
/** Sets number of active connections */
public void setXceiverCount(int xceiverCount) {
this.xceiverCount = xceiverCount;
}
/** network location */
public synchronized String getNetworkLocation() {return location;}
/** Sets the network location */
public synchronized void setNetworkLocation(String location) {
this.location = NodeBase.normalize(location);
}
/** Add a hostname to a list of network dependencies */
public void addDependentHostName(String hostname) {
dependentHostNames.add(hostname);
}
/** List of Network dependencies */
public List<String> getDependentHostNames() {
return dependentHostNames;
}
/** Sets the network dependencies */
public void setDependentHostNames(List<String> dependencyList) {
dependentHostNames = dependencyList;
}
/** A formatted string for reporting the status of the DataNode. */
public String getDatanodeReport() {
StringBuilder buffer = new StringBuilder();
long c = getCapacity();
long r = getRemaining();
long u = getDfsUsed();
long nonDFSUsed = getNonDfsUsed();
float usedPercent = getDfsUsedPercent();
float remainingPercent = getRemainingPercent();
long cc = getCacheCapacity();
long cr = getCacheRemaining();
long cu = getCacheUsed();
float cacheUsedPercent = getCacheUsedPercent();
float cacheRemainingPercent = getCacheRemainingPercent();
String lookupName = NetUtils.getHostNameOfIP(getName());
buffer.append("Name: "+ getName());
if (lookupName != null) {
buffer.append(" (" + lookupName + ")");
}
buffer.append("\n");
buffer.append("Hostname: " + getHostName() + "\n");
if (!NetworkTopology.DEFAULT_RACK.equals(location)) {
buffer.append("Rack: "+location+"\n");
}
buffer.append("Decommission Status : ");
if (isDecommissioned()) {
buffer.append("Decommissioned\n");
} else if (isDecommissionInProgress()) {
buffer.append("Decommission in progress\n");
} else {
buffer.append("Normal\n");
}
buffer.append("Configured Capacity: "+c+" ("+StringUtils.byteDesc(c)+")"+"\n");
buffer.append("DFS Used: "+u+" ("+StringUtils.byteDesc(u)+")"+"\n");
buffer.append("Non DFS Used: "+nonDFSUsed+" ("+StringUtils.byteDesc(nonDFSUsed)+")"+"\n");
buffer.append("DFS Remaining: " +r+ " ("+StringUtils.byteDesc(r)+")"+"\n");
buffer.append("DFS Used%: "+percent2String(usedPercent) + "\n");
buffer.append("DFS Remaining%: "+percent2String(remainingPercent) + "\n");
buffer.append("Configured Cache Capacity: "+cc+" ("+StringUtils.byteDesc(cc)+")"+"\n");
buffer.append("Cache Used: "+cu+" ("+StringUtils.byteDesc(cu)+")"+"\n");
buffer.append("Cache Remaining: " +cr+ " ("+StringUtils.byteDesc(cr)+")"+"\n");
buffer.append("Cache Used%: "+percent2String(cacheUsedPercent) + "\n");
buffer.append("Cache Remaining%: "+percent2String(cacheRemainingPercent) + "\n");
buffer.append("Xceivers: "+getXceiverCount()+"\n");
buffer.append("Last contact: "+new Date(lastUpdate)+"\n");
return buffer.toString();
}
/** A formatted string for printing the status of the DataNode. */
public String dumpDatanode() {
StringBuilder buffer = new StringBuilder();
long c = getCapacity();
long r = getRemaining();
long u = getDfsUsed();
long cc = getCacheCapacity();
long cr = getCacheRemaining();
long cu = getCacheUsed();
buffer.append(getName());
if (!NetworkTopology.DEFAULT_RACK.equals(location)) {
buffer.append(" "+location);
}
if (isDecommissioned()) {
buffer.append(" DD");
} else if (isDecommissionInProgress()) {
buffer.append(" DP");
} else {
buffer.append(" IN");
}
buffer.append(" " + c + "(" + StringUtils.byteDesc(c)+")");
buffer.append(" " + u + "(" + StringUtils.byteDesc(u)+")");
buffer.append(" " + percent2String(u/(double)c));
buffer.append(" " + r + "(" + StringUtils.byteDesc(r)+")");
buffer.append(" " + cc + "(" + StringUtils.byteDesc(cc)+")");
buffer.append(" " + cu + "(" + StringUtils.byteDesc(cu)+")");
buffer.append(" " + percent2String(cu/(double)cc));
buffer.append(" " + cr + "(" + StringUtils.byteDesc(cr)+")");
buffer.append(" " + new Date(lastUpdate));
return buffer.toString();
}
/**
* Start decommissioning a node.
* old state.
*/
public void startDecommission() {
adminState = AdminStates.DECOMMISSION_INPROGRESS;
}
/**
* Stop decommissioning a node.
* old state.
*/
public void stopDecommission() {
adminState = null;
}
/**
* Returns true if the node is in the process of being decommissioned
*/
public boolean isDecommissionInProgress() {
return adminState == AdminStates.DECOMMISSION_INPROGRESS;
}
/**
* Returns true if the node has been decommissioned.
*/
public boolean isDecommissioned() {
return adminState == AdminStates.DECOMMISSIONED;
}
/**
* Sets the admin state to indicate that decommission is complete.
*/
public void setDecommissioned() {
adminState = AdminStates.DECOMMISSIONED;
}
/**
* Retrieves the admin state of this node.
*/
public AdminStates getAdminState() {
if (adminState == null) {
return AdminStates.NORMAL;
}
return adminState;
}
/**
* Check if the datanode is in stale state. Here if
* the namenode has not received heartbeat msg from a
* datanode for more than staleInterval,
* the datanode will be treated as stale node.
*
* @param staleInterval
* the time interval for marking the node as stale. If the last
* update time is beyond the given time interval, the node will be
* marked as stale.
* @return true if the node is stale
*/
public boolean isStale(long staleInterval) {
return (Time.monotonicNow() - lastUpdateMonotonic) >= staleInterval;
}
/**
* Sets the admin state of this node.
*/
protected void setAdminState(AdminStates newState) {
if (newState == AdminStates.NORMAL) {
adminState = null;
}
else {
adminState = newState;
}
}
private transient int level; //which level of the tree the node resides
private transient Node parent; //its parent
/** Return this node's parent */
@Override
public Node getParent() { return parent; }
@Override
public void setParent(Node parent) {this.parent = parent;}
/** Return this node's level in the tree.
* E.g. the root of a tree returns 0 and its children return 1
*/
@Override
public int getLevel() { return level; }
@Override
public void setLevel(int level) {this.level = level;}
@Override
public int hashCode() {
// Super implementation is sufficient
return super.hashCode();
}
@Override
public boolean equals(Object obj) {
// Sufficient to use super equality as datanodes are uniquely identified
// by DatanodeID
return (this == obj) || super.equals(obj);
}
public String getSoftwareVersion() {
return softwareVersion;
}
public void setSoftwareVersion(String softwareVersion) {
this.softwareVersion = softwareVersion;
}
}
| 16,105 | 30.518591 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import java.io.IOException;
import java.util.EnumSet;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.crypto.CryptoProtocolVersion;
import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
import org.apache.hadoop.fs.CacheFlag;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.inotify.EventBatchList;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.retry.AtMostOnce;
import org.apache.hadoop.io.retry.Idempotent;
import org.apache.hadoop.security.KerberosInfo;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenInfo;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY;
/**********************************************************************
* ClientProtocol is used by user code via the DistributedFileSystem class to
* communicate with the NameNode. User code can manipulate the directory
* namespace, as well as open/close file streams, etc.
*
**********************************************************************/
@InterfaceAudience.Private
@InterfaceStability.Evolving
@KerberosInfo(
serverPrincipal = DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY)
@TokenInfo(DelegationTokenSelector.class)
public interface ClientProtocol {
/**
* Until version 69, this class ClientProtocol served as both
* the client interface to the NN AND the RPC protocol used to
* communicate with the NN.
*
* This class is used by both the DFSClient and the
* NN server side to insulate from the protocol serialization.
*
* If you are adding/changing this interface then you need to
* change both this class and ALSO related protocol buffer
* wire protocol definition in ClientNamenodeProtocol.proto.
*
* For more details on protocol buffer wire protocol, please see
* .../org/apache/hadoop/hdfs/protocolPB/overview.html
*
* The log of historical changes can be retrieved from the svn).
* 69: Eliminate overloaded method names.
*
* 69L is the last version id when this class was used for protocols
* serialization. DO not update this version any further.
*/
long versionID = 69L;
///////////////////////////////////////
// File contents
///////////////////////////////////////
/**
* Get locations of the blocks of the specified file
* within the specified range.
* DataNode locations for each block are sorted by
* the proximity to the client.
* <p>
* Return {@link LocatedBlocks} which contains
* file length, blocks and their locations.
* DataNode locations for each block are sorted by
* the distance to the client's address.
* <p>
* The client will then have to contact
* one of the indicated DataNodes to obtain the actual data.
*
* @param src file name
* @param offset range start offset
* @param length range length
*
* @return file length and array of blocks with their locations
*
* @throws org.apache.hadoop.security.AccessControlException If access is
* denied
* @throws java.io.FileNotFoundException If file <code>src</code> does not
* exist
* @throws org.apache.hadoop.fs.UnresolvedLinkException If <code>src</code>
* contains a symlink
* @throws IOException If an I/O error occurred
*/
@Idempotent
LocatedBlocks getBlockLocations(String src, long offset, long length)
throws IOException;
/**
* Get server default values for a number of configuration params.
* @return a set of server default configuration values
* @throws IOException
*/
@Idempotent
FsServerDefaults getServerDefaults() throws IOException;
/**
* Create a new file entry in the namespace.
* <p>
* This will create an empty file specified by the source path.
* The path should reflect a full path originated at the root.
* The name-node does not have a notion of "current" directory for a client.
* <p>
* Once created, the file is visible and available for read to other clients.
* Although, other clients cannot {@link #delete(String, boolean)}, re-create
* or {@link #rename(String, String)} it until the file is completed
* or explicitly as a result of lease expiration.
* <p>
* Blocks have a maximum size. Clients that intend to create
* multi-block files must also use
* {@link #addBlock}
*
* @param src path of the file being created.
* @param masked masked permission.
* @param clientName name of the current client.
* @param flag indicates whether the file should be
* overwritten if it already exists or create if it does not exist or append.
* @param createParent create missing parent directory if true
* @param replication block replication factor.
* @param blockSize maximum block size.
* @param supportedVersions CryptoProtocolVersions supported by the client
*
* @return the status of the created file, it could be null if the server
* doesn't support returning the file status
* @throws org.apache.hadoop.security.AccessControlException If access is
* denied
* @throws AlreadyBeingCreatedException if the path does not exist.
* @throws DSQuotaExceededException If file creation violates disk space
* quota restriction
* @throws org.apache.hadoop.fs.FileAlreadyExistsException If file
* <code>src</code> already exists
* @throws java.io.FileNotFoundException If parent of <code>src</code> does
* not exist and <code>createParent</code> is false
* @throws org.apache.hadoop.fs.ParentNotDirectoryException If parent of
* <code>src</code> is not a directory.
* @throws NSQuotaExceededException If file creation violates name space
* quota restriction
* @throws org.apache.hadoop.hdfs.server.namenode.SafeModeException create not
* allowed in safemode
* @throws org.apache.hadoop.fs.UnresolvedLinkException If <code>src</code>
* contains a symlink
* @throws SnapshotAccessControlException if path is in RO snapshot
* @throws IOException If an I/O error occurred
*
* RuntimeExceptions:
* @throws org.apache.hadoop.fs.InvalidPathException Path <code>src</code> is
* invalid
* <p>
* <em>Note that create with {@link CreateFlag#OVERWRITE} is idempotent.</em>
*/
@AtMostOnce
HdfsFileStatus create(String src, FsPermission masked,
String clientName, EnumSetWritable<CreateFlag> flag,
boolean createParent, short replication, long blockSize,
CryptoProtocolVersion[] supportedVersions)
throws IOException;
/**
* Append to the end of the file.
* @param src path of the file being created.
* @param clientName name of the current client.
* @param flag indicates whether the data is appended to a new block.
* @return wrapper with information about the last partial block and file
* status if any
* @throws org.apache.hadoop.security.AccessControlException if permission to
* append file is denied by the system. As usually on the client side the
* exception will be wrapped into
* {@link org.apache.hadoop.ipc.RemoteException}.
* Allows appending to an existing file if the server is
* configured with the parameter dfs.support.append set to true, otherwise
* throws an IOException.
*
* @throws org.apache.hadoop.security.AccessControlException If permission to
* append to file is denied
* @throws java.io.FileNotFoundException If file <code>src</code> is not found
* @throws DSQuotaExceededException If append violates disk space quota
* restriction
* @throws org.apache.hadoop.hdfs.server.namenode.SafeModeException append not
* allowed in safemode
* @throws org.apache.hadoop.fs.UnresolvedLinkException If <code>src</code>
* contains a symlink
* @throws SnapshotAccessControlException if path is in RO snapshot
* @throws IOException If an I/O error occurred.
*
* RuntimeExceptions:
* @throws UnsupportedOperationException if append is not supported
*/
@AtMostOnce
LastBlockWithStatus append(String src, String clientName,
EnumSetWritable<CreateFlag> flag) throws IOException;
/**
* Set replication for an existing file.
* <p>
* The NameNode sets replication to the new value and returns.
* The actual block replication is not expected to be performed during
* this method call. The blocks will be populated or removed in the
* background as the result of the routine block maintenance procedures.
*
* @param src file name
* @param replication new replication
*
* @return true if successful;
* false if file does not exist or is a directory
*
* @throws org.apache.hadoop.security.AccessControlException If access is
* denied
* @throws DSQuotaExceededException If replication violates disk space
* quota restriction
* @throws java.io.FileNotFoundException If file <code>src</code> is not found
* @throws org.apache.hadoop.hdfs.server.namenode.SafeModeException not
* allowed in safemode
* @throws org.apache.hadoop.fs.UnresolvedLinkException if <code>src</code>
* contains a symlink
* @throws SnapshotAccessControlException if path is in RO snapshot
* @throws IOException If an I/O error occurred
*/
@Idempotent
boolean setReplication(String src, short replication)
throws IOException;
/**
* Get all the available block storage policies.
* @return All the in-use block storage policies currently.
*/
@Idempotent
BlockStoragePolicy[] getStoragePolicies() throws IOException;
/**
* Set the storage policy for a file/directory.
* @param src Path of an existing file/directory.
* @param policyName The name of the storage policy
* @throws SnapshotAccessControlException If access is denied
* @throws org.apache.hadoop.fs.UnresolvedLinkException if <code>src</code>
* contains a symlink
* @throws java.io.FileNotFoundException If file/dir <code>src</code> is not
* found
* @throws QuotaExceededException If changes violate the quota restriction
*/
@Idempotent
void setStoragePolicy(String src, String policyName)
throws IOException;
/**
* Get the storage policy for a file/directory.
* @param path
* Path of an existing file/directory.
* @throws AccessControlException
* If access is denied
* @throws org.apache.hadoop.fs.UnresolvedLinkException
* if <code>src</code> contains a symlink
* @throws java.io.FileNotFoundException
* If file/dir <code>src</code> is not found
*/
@Idempotent
BlockStoragePolicy getStoragePolicy(String path) throws IOException;
/**
* Set permissions for an existing file/directory.
*
* @throws org.apache.hadoop.security.AccessControlException If access is
* denied
* @throws java.io.FileNotFoundException If file <code>src</code> is not found
* @throws org.apache.hadoop.hdfs.server.namenode.SafeModeException not
* allowed in safemode
* @throws org.apache.hadoop.fs.UnresolvedLinkException If <code>src</code>
* contains a symlink
* @throws SnapshotAccessControlException if path is in RO snapshot
* @throws IOException If an I/O error occurred
*/
@Idempotent
void setPermission(String src, FsPermission permission)
throws IOException;
/**
* Set Owner of a path (i.e. a file or a directory).
* The parameters username and groupname cannot both be null.
* @param src file path
* @param username If it is null, the original username remains unchanged.
* @param groupname If it is null, the original groupname remains unchanged.
*
* @throws org.apache.hadoop.security.AccessControlException If access is
* denied
* @throws java.io.FileNotFoundException If file <code>src</code> is not found
* @throws org.apache.hadoop.hdfs.server.namenode.SafeModeException not
* allowed in safemode
* @throws org.apache.hadoop.fs.UnresolvedLinkException If <code>src</code>
* contains a symlink
* @throws SnapshotAccessControlException if path is in RO snapshot
* @throws IOException If an I/O error occurred
*/
@Idempotent
void setOwner(String src, String username, String groupname)
throws IOException;
/**
* The client can give up on a block by calling abandonBlock().
* The client can then either obtain a new block, or complete or abandon the
* file.
* Any partial writes to the block will be discarded.
*
* @param b Block to abandon
* @param fileId The id of the file where the block resides. Older clients
* will pass GRANDFATHER_INODE_ID here.
* @param src The path of the file where the block resides.
* @param holder Lease holder.
*
* @throws org.apache.hadoop.security.AccessControlException If access is
* denied
* @throws java.io.FileNotFoundException file <code>src</code> is not found
* @throws org.apache.hadoop.fs.UnresolvedLinkException If <code>src</code>
* contains a symlink
* @throws IOException If an I/O error occurred
*/
@Idempotent
void abandonBlock(ExtendedBlock b, long fileId,
String src, String holder)
throws IOException;
/**
* A client that wants to write an additional block to the
* indicated filename (which must currently be open for writing)
* should call addBlock().
*
* addBlock() allocates a new block and datanodes the block data
* should be replicated to.
*
* addBlock() also commits the previous block by reporting
* to the name-node the actual generation stamp and the length
* of the block that the client has transmitted to data-nodes.
*
* @param src the file being created
* @param clientName the name of the client that adds the block
* @param previous previous block
* @param excludeNodes a list of nodes that should not be
* allocated for the current block
* @param fileId the id uniquely identifying a file
* @param favoredNodes the list of nodes where the client wants the blocks.
* Nodes are identified by either host name or address.
*
* @return LocatedBlock allocated block information.
*
* @throws org.apache.hadoop.security.AccessControlException If access is
* denied
* @throws java.io.FileNotFoundException If file <code>src</code> is not found
* @throws org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException
* previous blocks of the file are not replicated yet.
* Blocks cannot be added until replication completes.
* @throws org.apache.hadoop.hdfs.server.namenode.SafeModeException create not
* allowed in safemode
* @throws org.apache.hadoop.fs.UnresolvedLinkException If <code>src</code>
* contains a symlink
* @throws IOException If an I/O error occurred
*/
@Idempotent
LocatedBlock addBlock(String src, String clientName,
ExtendedBlock previous, DatanodeInfo[] excludeNodes, long fileId,
String[] favoredNodes)
throws IOException;
/**
* Get a datanode for an existing pipeline.
*
* @param src the file being written
* @param fileId the ID of the file being written
* @param blk the block being written
* @param existings the existing nodes in the pipeline
* @param excludes the excluded nodes
* @param numAdditionalNodes number of additional datanodes
* @param clientName the name of the client
*
* @return the located block.
*
* @throws org.apache.hadoop.security.AccessControlException If access is
* denied
* @throws java.io.FileNotFoundException If file <code>src</code> is not found
* @throws org.apache.hadoop.hdfs.server.namenode.SafeModeException create not
* allowed in safemode
* @throws org.apache.hadoop.fs.UnresolvedLinkException If <code>src</code>
* contains a symlink
* @throws IOException If an I/O error occurred
*/
@Idempotent
LocatedBlock getAdditionalDatanode(final String src,
final long fileId, final ExtendedBlock blk,
final DatanodeInfo[] existings,
final String[] existingStorageIDs,
final DatanodeInfo[] excludes,
final int numAdditionalNodes, final String clientName
) throws IOException;
/**
* The client is done writing data to the given filename, and would
* like to complete it.
*
* The function returns whether the file has been closed successfully.
* If the function returns false, the caller should try again.
*
* close() also commits the last block of file by reporting
* to the name-node the actual generation stamp and the length
* of the block that the client has transmitted to data-nodes.
*
* A call to complete() will not return true until all the file's
* blocks have been replicated the minimum number of times. Thus,
* DataNode failures may cause a client to call complete() several
* times before succeeding.
*
* @param src the file being created
* @param clientName the name of the client that adds the block
* @param last the last block info
* @param fileId the id uniquely identifying a file
*
* @return true if all file blocks are minimally replicated or false otherwise
*
* @throws org.apache.hadoop.security.AccessControlException If access is
* denied
* @throws java.io.FileNotFoundException If file <code>src</code> is not found
* @throws org.apache.hadoop.hdfs.server.namenode.SafeModeException create not
* allowed in safemode
* @throws org.apache.hadoop.fs.UnresolvedLinkException If <code>src</code>
* contains a symlink
* @throws IOException If an I/O error occurred
*/
@Idempotent
boolean complete(String src, String clientName,
ExtendedBlock last, long fileId)
throws IOException;
/**
* The client wants to report corrupted blocks (blocks with specified
* locations on datanodes).
* @param blocks Array of located blocks to report
*/
@Idempotent
void reportBadBlocks(LocatedBlock[] blocks) throws IOException;
///////////////////////////////////////
// Namespace management
///////////////////////////////////////
/**
* Rename an item in the file system namespace.
* @param src existing file or directory name.
* @param dst new name.
* @return true if successful, or false if the old name does not exist
* or if the new name already belongs to the namespace.
*
* @throws SnapshotAccessControlException if path is in RO snapshot
* @throws IOException an I/O error occurred
*/
@AtMostOnce
boolean rename(String src, String dst)
throws IOException;
/**
* Moves blocks from srcs to trg and delete srcs.
*
* @param trg existing file
* @param srcs - list of existing files (same block size, same replication)
* @throws IOException if some arguments are invalid
* @throws org.apache.hadoop.fs.UnresolvedLinkException if <code>trg</code> or
* <code>srcs</code> contains a symlink
* @throws SnapshotAccessControlException if path is in RO snapshot
*/
@AtMostOnce
void concat(String trg, String[] srcs)
throws IOException;
/**
* Rename src to dst.
* <ul>
* <li>Fails if src is a file and dst is a directory.
* <li>Fails if src is a directory and dst is a file.
* <li>Fails if the parent of dst does not exist or is a file.
* </ul>
* <p>
* Without OVERWRITE option, rename fails if the dst already exists.
* With OVERWRITE option, rename overwrites the dst, if it is a file
* or an empty directory. Rename fails if dst is a non-empty directory.
* <p>
* This implementation of rename is atomic.
* <p>
* @param src existing file or directory name.
* @param dst new name.
* @param options Rename options
*
* @throws org.apache.hadoop.security.AccessControlException If access is
* denied
* @throws DSQuotaExceededException If rename violates disk space
* quota restriction
* @throws org.apache.hadoop.fs.FileAlreadyExistsException If <code>dst</code>
* already exists and <code>options</code> has
* {@link org.apache.hadoop.fs.Options.Rename#OVERWRITE} option
* false.
* @throws java.io.FileNotFoundException If <code>src</code> does not exist
* @throws NSQuotaExceededException If rename violates namespace
* quota restriction
* @throws org.apache.hadoop.fs.ParentNotDirectoryException If parent of
* <code>dst</code> is not a directory
* @throws org.apache.hadoop.hdfs.server.namenode.SafeModeException rename not
* allowed in safemode
* @throws org.apache.hadoop.fs.UnresolvedLinkException If <code>src</code> or
* <code>dst</code> contains a symlink
* @throws SnapshotAccessControlException if path is in RO snapshot
* @throws IOException If an I/O error occurred
*/
@AtMostOnce
void rename2(String src, String dst, Options.Rename... options)
throws IOException;
/**
* Truncate file src to new size.
* <ul>
* <li>Fails if src is a directory.
* <li>Fails if src does not exist.
* <li>Fails if src is not closed.
* <li>Fails if new size is greater than current size.
* </ul>
* <p>
* This implementation of truncate is purely a namespace operation if truncate
* occurs at a block boundary. Requires DataNode block recovery otherwise.
* <p>
* @param src existing file
* @param newLength the target size
*
* @return true if client does not need to wait for block recovery,
* false if client needs to wait for block recovery.
*
* @throws org.apache.hadoop.security.AccessControlException If access is
* denied
* @throws java.io.FileNotFoundException If file <code>src</code> is not found
* @throws org.apache.hadoop.hdfs.server.namenode.SafeModeException truncate
* not allowed in safemode
* @throws org.apache.hadoop.fs.UnresolvedLinkException If <code>src</code>
* contains a symlink
* @throws SnapshotAccessControlException if path is in RO snapshot
* @throws IOException If an I/O error occurred
*/
@Idempotent
boolean truncate(String src, long newLength, String clientName)
throws IOException;
/**
* Delete the given file or directory from the file system.
* <p>
* same as delete but provides a way to avoid accidentally
* deleting non empty directories programmatically.
* @param src existing name
* @param recursive if true deletes a non empty directory recursively,
* else throws an exception.
* @return true only if the existing file or directory was actually removed
* from the file system.
*
* @throws org.apache.hadoop.security.AccessControlException If access is
* denied
* @throws java.io.FileNotFoundException If file <code>src</code> is not found
* @throws org.apache.hadoop.hdfs.server.namenode.SafeModeException create not
* allowed in safemode
* @throws org.apache.hadoop.fs.UnresolvedLinkException If <code>src</code>
* contains a symlink
* @throws SnapshotAccessControlException if path is in RO snapshot
* @throws IOException If an I/O error occurred
*/
@AtMostOnce
boolean delete(String src, boolean recursive)
throws IOException;
/**
* Create a directory (or hierarchy of directories) with the given
* name and permission.
*
* @param src The path of the directory being created
* @param masked The masked permission of the directory being created
* @param createParent create missing parent directory if true
*
* @return True if the operation success.
*
* @throws org.apache.hadoop.security.AccessControlException If access is
* denied
* @throws org.apache.hadoop.fs.FileAlreadyExistsException If <code>src</code>
* already exists
* @throws java.io.FileNotFoundException If parent of <code>src</code> does
* not exist and <code>createParent</code> is false
* @throws NSQuotaExceededException If file creation violates quota
* restriction
* @throws org.apache.hadoop.fs.ParentNotDirectoryException If parent of
* <code>src</code> is not a directory
* @throws org.apache.hadoop.hdfs.server.namenode.SafeModeException create not
* allowed in safemode
* @throws org.apache.hadoop.fs.UnresolvedLinkException If <code>src</code>
* contains a symlink
* @throws SnapshotAccessControlException if path is in RO snapshot
* @throws IOException If an I/O error occurred.
*
* RunTimeExceptions:
* @throws org.apache.hadoop.fs.InvalidPathException If <code>src</code> is
* invalid
*/
@Idempotent
boolean mkdirs(String src, FsPermission masked, boolean createParent)
throws IOException;
/**
* Get a partial listing of the indicated directory.
*
* @param src the directory name
* @param startAfter the name to start listing after encoded in java UTF8
* @param needLocation if the FileStatus should contain block locations
*
* @return a partial listing starting after startAfter
*
* @throws org.apache.hadoop.security.AccessControlException permission denied
* @throws java.io.FileNotFoundException file <code>src</code> is not found
* @throws org.apache.hadoop.fs.UnresolvedLinkException If <code>src</code>
* contains a symlink
* @throws IOException If an I/O error occurred
*/
@Idempotent
DirectoryListing getListing(String src, byte[] startAfter,
boolean needLocation) throws IOException;
/**
* Get listing of all the snapshottable directories.
*
* @return Information about all the current snapshottable directory
* @throws IOException If an I/O error occurred
*/
@Idempotent
SnapshottableDirectoryStatus[] getSnapshottableDirListing()
throws IOException;
///////////////////////////////////////
// System issues and management
///////////////////////////////////////
/**
* Client programs can cause stateful changes in the NameNode
* that affect other clients. A client may obtain a file and
* neither abandon nor complete it. A client might hold a series
* of locks that prevent other clients from proceeding.
* Clearly, it would be bad if a client held a bunch of locks
* that it never gave up. This can happen easily if the client
* dies unexpectedly.
* <p>
* So, the NameNode will revoke the locks and live file-creates
* for clients that it thinks have died. A client tells the
* NameNode that it is still alive by periodically calling
* renewLease(). If a certain amount of time passes since
* the last call to renewLease(), the NameNode assumes the
* client has died.
*
* @throws org.apache.hadoop.security.AccessControlException permission denied
* @throws IOException If an I/O error occurred
*/
@Idempotent
void renewLease(String clientName) throws IOException;
/**
* Start lease recovery.
* Lightweight NameNode operation to trigger lease recovery
*
* @param src path of the file to start lease recovery
* @param clientName name of the current client
* @return true if the file is already closed
* @throws IOException
*/
@Idempotent
boolean recoverLease(String src, String clientName) throws IOException;
int GET_STATS_CAPACITY_IDX = 0;
int GET_STATS_USED_IDX = 1;
int GET_STATS_REMAINING_IDX = 2;
int GET_STATS_UNDER_REPLICATED_IDX = 3;
int GET_STATS_CORRUPT_BLOCKS_IDX = 4;
int GET_STATS_MISSING_BLOCKS_IDX = 5;
int GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX = 6;
/**
* Get a set of statistics about the filesystem.
* Right now, only seven values are returned.
* <ul>
* <li> [0] contains the total storage capacity of the system, in bytes.</li>
* <li> [1] contains the total used space of the system, in bytes.</li>
* <li> [2] contains the available storage of the system, in bytes.</li>
* <li> [3] contains number of under replicated blocks in the system.</li>
* <li> [4] contains number of blocks with a corrupt replica. </li>
* <li> [5] contains number of blocks without any good replicas left. </li>
* <li> [6] contains number of blocks which have replication factor
* 1 and have lost the only replica. </li>
* </ul>
* Use public constants like {@link #GET_STATS_CAPACITY_IDX} in place of
* actual numbers to index into the array.
*/
@Idempotent
long[] getStats() throws IOException;
/**
* Get a report on the system's current datanodes.
* One DatanodeInfo object is returned for each DataNode.
* Return live datanodes if type is LIVE; dead datanodes if type is DEAD;
* otherwise all datanodes if type is ALL.
*/
@Idempotent
DatanodeInfo[] getDatanodeReport(HdfsConstants.DatanodeReportType type)
throws IOException;
/**
* Get a report on the current datanode storages.
*/
@Idempotent
DatanodeStorageReport[] getDatanodeStorageReport(
HdfsConstants.DatanodeReportType type) throws IOException;
/**
* Get the block size for the given file.
* @param filename The name of the file
* @return The number of bytes in each block
* @throws IOException
* @throws org.apache.hadoop.fs.UnresolvedLinkException if the path contains
* a symlink.
*/
@Idempotent
long getPreferredBlockSize(String filename)
throws IOException;
/**
* Enter, leave or get safe mode.
* <p>
* Safe mode is a name node state when it
* <ol><li>does not accept changes to name space (read-only), and</li>
* <li>does not replicate or delete blocks.</li></ol>
*
* <p>
* Safe mode is entered automatically at name node startup.
* Safe mode can also be entered manually using
* {@link #setSafeMode(HdfsConstants.SafeModeAction,boolean)
* setSafeMode(SafeModeAction.SAFEMODE_ENTER,false)}.
* <p>
* At startup the name node accepts data node reports collecting
* information about block locations.
* In order to leave safe mode it needs to collect a configurable
* percentage called threshold of blocks, which satisfy the minimal
* replication condition.
* The minimal replication condition is that each block must have at least
* <tt>dfs.namenode.replication.min</tt> replicas.
* When the threshold is reached the name node extends safe mode
* for a configurable amount of time
* to let the remaining data nodes to check in before it
* will start replicating missing blocks.
* Then the name node leaves safe mode.
* <p>
* If safe mode is turned on manually using
* {@link #setSafeMode(HdfsConstants.SafeModeAction,boolean)
* setSafeMode(SafeModeAction.SAFEMODE_ENTER,false)}
* then the name node stays in safe mode until it is manually turned off
* using {@link #setSafeMode(HdfsConstants.SafeModeAction,boolean)
* setSafeMode(SafeModeAction.SAFEMODE_LEAVE,false)}.
* Current state of the name node can be verified using
* {@link #setSafeMode(HdfsConstants.SafeModeAction,boolean)
* setSafeMode(SafeModeAction.SAFEMODE_GET,false)}
* <h4>Configuration parameters:</h4>
* <tt>dfs.safemode.threshold.pct</tt> is the threshold parameter.<br>
* <tt>dfs.safemode.extension</tt> is the safe mode extension parameter.<br>
* <tt>dfs.namenode.replication.min</tt> is the minimal replication parameter.
*
* <h4>Special cases:</h4>
* The name node does not enter safe mode at startup if the threshold is
* set to 0 or if the name space is empty.<br>
* If the threshold is set to 1 then all blocks need to have at least
* minimal replication.<br>
* If the threshold value is greater than 1 then the name node will not be
* able to turn off safe mode automatically.<br>
* Safe mode can always be turned off manually.
*
* @param action <ul> <li>0 leave safe mode;</li>
* <li>1 enter safe mode;</li>
* <li>2 get safe mode state.</li></ul>
* @param isChecked If true then action will be done only in ActiveNN.
*
* @return <ul><li>0 if the safe mode is OFF or</li>
* <li>1 if the safe mode is ON.</li></ul>
*
* @throws IOException
*/
@Idempotent
boolean setSafeMode(HdfsConstants.SafeModeAction action, boolean isChecked)
throws IOException;
/**
* Save namespace image.
* <p>
* Saves current namespace into storage directories and reset edits log.
* Requires superuser privilege and safe mode.
*
* @throws AccessControlException if the superuser privilege is violated.
* @throws IOException if image creation failed.
*/
@AtMostOnce
void saveNamespace() throws IOException;
/**
* Roll the edit log.
* Requires superuser privileges.
*
* @throws org.apache.hadoop.security.AccessControlException if the superuser
* privilege is violated
* @throws IOException if log roll fails
* @return the txid of the new segment
*/
@Idempotent
long rollEdits() throws IOException;
/**
* Enable/Disable restore failed storage.
* <p>
* sets flag to enable restore of failed storage replicas
*
* @throws org.apache.hadoop.security.AccessControlException if the superuser
* privilege is violated.
*/
@Idempotent
boolean restoreFailedStorage(String arg) throws IOException;
/**
* Tells the namenode to reread the hosts and exclude files.
* @throws IOException
*/
@Idempotent
void refreshNodes() throws IOException;
/**
* Finalize previous upgrade.
* Remove file system state saved during the upgrade.
* The upgrade will become irreversible.
*
* @throws IOException
*/
@Idempotent
void finalizeUpgrade() throws IOException;
/**
* Rolling upgrade operations.
* @param action either query, prepare or finalize.
* @return rolling upgrade information. On query, if no upgrade is in
* progress, returns null.
*/
@Idempotent
RollingUpgradeInfo rollingUpgrade(RollingUpgradeAction action)
throws IOException;
/**
* @return CorruptFileBlocks, containing a list of corrupt files (with
* duplicates if there is more than one corrupt block in a file)
* and a cookie
* @throws IOException
*
* Each call returns a subset of the corrupt files in the system. To obtain
* all corrupt files, call this method repeatedly and each time pass in the
* cookie returned from the previous call.
*/
@Idempotent
CorruptFileBlocks listCorruptFileBlocks(String path, String cookie)
throws IOException;
/**
* Dumps namenode data structures into specified file. If the file
* already exists, then append.
*
* @throws IOException
*/
@Idempotent
void metaSave(String filename) throws IOException;
/**
* Tell all datanodes to use a new, non-persistent bandwidth value for
* dfs.balance.bandwidthPerSec.
*
* @param bandwidth Blanacer bandwidth in bytes per second for this datanode.
* @throws IOException
*/
@Idempotent
void setBalancerBandwidth(long bandwidth) throws IOException;
/**
* Get the file info for a specific file or directory.
* @param src The string representation of the path to the file
*
* @return object containing information regarding the file
* or null if file not found
* @throws org.apache.hadoop.security.AccessControlException permission denied
* @throws java.io.FileNotFoundException file <code>src</code> is not found
* @throws org.apache.hadoop.fs.UnresolvedLinkException if the path contains
* a symlink.
* @throws IOException If an I/O error occurred
*/
@Idempotent
HdfsFileStatus getFileInfo(String src) throws IOException;
/**
* Get the close status of a file.
* @param src The string representation of the path to the file
*
* @return return true if file is closed
* @throws org.apache.hadoop.security.AccessControlException permission denied
* @throws java.io.FileNotFoundException file <code>src</code> is not found
* @throws org.apache.hadoop.fs.UnresolvedLinkException if the path contains
* a symlink.
* @throws IOException If an I/O error occurred
*/
@Idempotent
boolean isFileClosed(String src) throws IOException;
/**
* Get the file info for a specific file or directory. If the path
* refers to a symlink then the FileStatus of the symlink is returned.
* @param src The string representation of the path to the file
*
* @return object containing information regarding the file
* or null if file not found
*
* @throws org.apache.hadoop.security.AccessControlException permission denied
* @throws org.apache.hadoop.fs.UnresolvedLinkException if <code>src</code>
* contains a symlink
* @throws IOException If an I/O error occurred
*/
@Idempotent
HdfsFileStatus getFileLinkInfo(String src) throws IOException;
/**
* Get {@link ContentSummary} rooted at the specified directory.
* @param path The string representation of the path
*
* @throws org.apache.hadoop.security.AccessControlException permission denied
* @throws java.io.FileNotFoundException file <code>path</code> is not found
* @throws org.apache.hadoop.fs.UnresolvedLinkException if <code>path</code>
* contains a symlink.
* @throws IOException If an I/O error occurred
*/
@Idempotent
ContentSummary getContentSummary(String path) throws IOException;
/**
* Set the quota for a directory.
* @param path The string representation of the path to the directory
* @param namespaceQuota Limit on the number of names in the tree rooted
* at the directory
* @param storagespaceQuota Limit on storage space occupied all the files
* under this directory.
* @param type StorageType that the space quota is intended to be set on.
* It may be null when called by traditional space/namespace
* quota. When type is is not null, the storagespaceQuota
* parameter is for type specified and namespaceQuota must be
* {@link HdfsConstants#QUOTA_DONT_SET}.
*
* <br><br>
*
* The quota can have three types of values : (1) 0 or more will set
* the quota to that value, (2) {@link HdfsConstants#QUOTA_DONT_SET} implies
* the quota will not be changed, and (3) {@link HdfsConstants#QUOTA_RESET}
* implies the quota will be reset. Any other value is a runtime error.
*
* @throws org.apache.hadoop.security.AccessControlException permission denied
* @throws java.io.FileNotFoundException file <code>path</code> is not found
* @throws QuotaExceededException if the directory size
* is greater than the given quota
* @throws org.apache.hadoop.fs.UnresolvedLinkException if the
* <code>path</code> contains a symlink.
* @throws SnapshotAccessControlException if path is in RO snapshot
* @throws IOException If an I/O error occurred
*/
@Idempotent
void setQuota(String path, long namespaceQuota, long storagespaceQuota,
StorageType type) throws IOException;
/**
* Write all metadata for this file into persistent storage.
* The file must be currently open for writing.
* @param src The string representation of the path
* @param inodeId The inode ID, or GRANDFATHER_INODE_ID if the client is
* too old to support fsync with inode IDs.
* @param client The string representation of the client
* @param lastBlockLength The length of the last block (under construction)
* to be reported to NameNode
* @throws org.apache.hadoop.security.AccessControlException permission denied
* @throws java.io.FileNotFoundException file <code>src</code> is not found
* @throws org.apache.hadoop.fs.UnresolvedLinkException if <code>src</code>
* contains a symlink.
* @throws IOException If an I/O error occurred
*/
@Idempotent
void fsync(String src, long inodeId, String client, long lastBlockLength)
throws IOException;
/**
* Sets the modification and access time of the file to the specified time.
* @param src The string representation of the path
* @param mtime The number of milliseconds since Jan 1, 1970.
* Setting mtime to -1 means that modification time should not
* be set by this call.
* @param atime The number of milliseconds since Jan 1, 1970.
* Setting atime to -1 means that access time should not be set
* by this call.
*
* @throws org.apache.hadoop.security.AccessControlException permission denied
* @throws java.io.FileNotFoundException file <code>src</code> is not found
* @throws org.apache.hadoop.fs.UnresolvedLinkException if <code>src</code>
* contains a symlink.
* @throws SnapshotAccessControlException if path is in RO snapshot
* @throws IOException If an I/O error occurred
*/
@Idempotent
void setTimes(String src, long mtime, long atime) throws IOException;
/**
* Create symlink to a file or directory.
* @param target The path of the destination that the
* link points to.
* @param link The path of the link being created.
* @param dirPerm permissions to use when creating parent directories
* @param createParent - if true then missing parent dirs are created
* if false then parent must exist
*
* @throws org.apache.hadoop.security.AccessControlException permission denied
* @throws org.apache.hadoop.fs.FileAlreadyExistsException If file
* <code>link</code> already exists
* @throws java.io.FileNotFoundException If parent of <code>link</code> does
* not exist and <code>createParent</code> is false
* @throws org.apache.hadoop.fs.ParentNotDirectoryException If parent of
* <code>link</code> is not a directory.
* @throws org.apache.hadoop.fs.UnresolvedLinkException if <code>link</code>
* contains a symlink.
* @throws SnapshotAccessControlException if path is in RO snapshot
* @throws IOException If an I/O error occurred
*/
@AtMostOnce
void createSymlink(String target, String link, FsPermission dirPerm,
boolean createParent) throws IOException;
/**
* Return the target of the given symlink. If there is an intermediate
* symlink in the path (ie a symlink leading up to the final path component)
* then the given path is returned with this symlink resolved.
*
* @param path The path with a link that needs resolution.
* @return The path after resolving the first symbolic link in the path.
* @throws org.apache.hadoop.security.AccessControlException permission denied
* @throws java.io.FileNotFoundException If <code>path</code> does not exist
* @throws IOException If the given path does not refer to a symlink
* or an I/O error occurred
*/
@Idempotent
String getLinkTarget(String path) throws IOException;
/**
* Get a new generation stamp together with an access token for
* a block under construction
*
* This method is called only when a client needs to recover a failed
* pipeline or set up a pipeline for appending to a block.
*
* @param block a block
* @param clientName the name of the client
* @return a located block with a new generation stamp and an access token
* @throws IOException if any error occurs
*/
@Idempotent
LocatedBlock updateBlockForPipeline(ExtendedBlock block,
String clientName) throws IOException;
/**
* Update a pipeline for a block under construction.
*
* @param clientName the name of the client
* @param oldBlock the old block
* @param newBlock the new block containing new generation stamp and length
* @param newNodes datanodes in the pipeline
* @throws IOException if any error occurs
*/
@AtMostOnce
void updatePipeline(String clientName, ExtendedBlock oldBlock,
ExtendedBlock newBlock, DatanodeID[] newNodes, String[] newStorageIDs)
throws IOException;
/**
* Get a valid Delegation Token.
*
* @param renewer the designated renewer for the token
* @return Token<DelegationTokenIdentifier>
* @throws IOException
*/
@Idempotent
Token<DelegationTokenIdentifier> getDelegationToken(Text renewer)
throws IOException;
/**
* Renew an existing delegation token.
*
* @param token delegation token obtained earlier
* @return the new expiration time
* @throws IOException
*/
@Idempotent
long renewDelegationToken(Token<DelegationTokenIdentifier> token)
throws IOException;
/**
* Cancel an existing delegation token.
*
* @param token delegation token
* @throws IOException
*/
@Idempotent
void cancelDelegationToken(Token<DelegationTokenIdentifier> token)
throws IOException;
/**
* @return encryption key so a client can encrypt data sent via the
* DataTransferProtocol to/from DataNodes.
* @throws IOException
*/
@Idempotent
DataEncryptionKey getDataEncryptionKey() throws IOException;
/**
* Create a snapshot.
* @param snapshotRoot the path that is being snapshotted
* @param snapshotName name of the snapshot created
* @return the snapshot path.
* @throws IOException
*/
@AtMostOnce
String createSnapshot(String snapshotRoot, String snapshotName)
throws IOException;
/**
* Delete a specific snapshot of a snapshottable directory.
* @param snapshotRoot The snapshottable directory
* @param snapshotName Name of the snapshot for the snapshottable directory
* @throws IOException
*/
@AtMostOnce
void deleteSnapshot(String snapshotRoot, String snapshotName)
throws IOException;
/**
* Rename a snapshot.
* @param snapshotRoot the directory path where the snapshot was taken
* @param snapshotOldName old name of the snapshot
* @param snapshotNewName new name of the snapshot
* @throws IOException
*/
@AtMostOnce
void renameSnapshot(String snapshotRoot, String snapshotOldName,
String snapshotNewName) throws IOException;
/**
* Allow snapshot on a directory.
* @param snapshotRoot the directory to be snapped
* @throws IOException on error
*/
@Idempotent
void allowSnapshot(String snapshotRoot)
throws IOException;
/**
* Disallow snapshot on a directory.
* @param snapshotRoot the directory to disallow snapshot
* @throws IOException on error
*/
@Idempotent
void disallowSnapshot(String snapshotRoot)
throws IOException;
/**
* Get the difference between two snapshots, or between a snapshot and the
* current tree of a directory.
*
* @param snapshotRoot
* full path of the directory where snapshots are taken
* @param fromSnapshot
* snapshot name of the from point. Null indicates the current
* tree
* @param toSnapshot
* snapshot name of the to point. Null indicates the current
* tree.
* @return The difference report represented as a {@link SnapshotDiffReport}.
* @throws IOException on error
*/
@Idempotent
SnapshotDiffReport getSnapshotDiffReport(String snapshotRoot,
String fromSnapshot, String toSnapshot) throws IOException;
/**
* Add a CacheDirective to the CacheManager.
*
* @param directive A CacheDirectiveInfo to be added
* @param flags {@link CacheFlag}s to use for this operation.
* @return A CacheDirectiveInfo associated with the added directive
* @throws IOException if the directive could not be added
*/
@AtMostOnce
long addCacheDirective(CacheDirectiveInfo directive,
EnumSet<CacheFlag> flags) throws IOException;
/**
* Modify a CacheDirective in the CacheManager.
*
* @param flags {@link CacheFlag}s to use for this operation.
* @throws IOException if the directive could not be modified
*/
@AtMostOnce
void modifyCacheDirective(CacheDirectiveInfo directive,
EnumSet<CacheFlag> flags) throws IOException;
/**
* Remove a CacheDirectiveInfo from the CacheManager.
*
* @param id of a CacheDirectiveInfo
* @throws IOException if the cache directive could not be removed
*/
@AtMostOnce
void removeCacheDirective(long id) throws IOException;
/**
* List the set of cached paths of a cache pool. Incrementally fetches results
* from the server.
*
* @param prevId The last listed entry ID, or -1 if this is the first call to
* listCacheDirectives.
* @param filter Parameters to use to filter the list results,
* or null to display all directives visible to us.
* @return A batch of CacheDirectiveEntry objects.
*/
@Idempotent
BatchedEntries<CacheDirectiveEntry> listCacheDirectives(
long prevId, CacheDirectiveInfo filter) throws IOException;
/**
* Add a new cache pool.
*
* @param info Description of the new cache pool
* @throws IOException If the request could not be completed.
*/
@AtMostOnce
void addCachePool(CachePoolInfo info) throws IOException;
/**
* Modify an existing cache pool.
*
* @param req
* The request to modify a cache pool.
* @throws IOException
* If the request could not be completed.
*/
@AtMostOnce
void modifyCachePool(CachePoolInfo req) throws IOException;
/**
* Remove a cache pool.
*
* @param pool name of the cache pool to remove.
* @throws IOException if the cache pool did not exist, or could not be
* removed.
*/
@AtMostOnce
void removeCachePool(String pool) throws IOException;
/**
* List the set of cache pools. Incrementally fetches results from the server.
*
* @param prevPool name of the last pool listed, or the empty string if this
* is the first invocation of listCachePools
* @return A batch of CachePoolEntry objects.
*/
@Idempotent
BatchedEntries<CachePoolEntry> listCachePools(String prevPool)
throws IOException;
/**
* Modifies ACL entries of files and directories. This method can add new ACL
* entries or modify the permissions on existing ACL entries. All existing
* ACL entries that are not specified in this call are retained without
* changes. (Modifications are merged into the current ACL.)
*/
@Idempotent
void modifyAclEntries(String src, List<AclEntry> aclSpec)
throws IOException;
/**
* Removes ACL entries from files and directories. Other ACL entries are
* retained.
*/
@Idempotent
void removeAclEntries(String src, List<AclEntry> aclSpec)
throws IOException;
/**
* Removes all default ACL entries from files and directories.
*/
@Idempotent
void removeDefaultAcl(String src) throws IOException;
/**
* Removes all but the base ACL entries of files and directories. The entries
* for user, group, and others are retained for compatibility with permission
* bits.
*/
@Idempotent
void removeAcl(String src) throws IOException;
/**
* Fully replaces ACL of files and directories, discarding all existing
* entries.
*/
@Idempotent
void setAcl(String src, List<AclEntry> aclSpec) throws IOException;
/**
* Gets the ACLs of files and directories.
*/
@Idempotent
AclStatus getAclStatus(String src) throws IOException;
/**
* Create an encryption zone.
*/
@AtMostOnce
void createEncryptionZone(String src, String keyName)
throws IOException;
/**
* Get the encryption zone for a path.
*/
@Idempotent
EncryptionZone getEZForPath(String src)
throws IOException;
/**
* Used to implement cursor-based batched listing of {@EncryptionZone}s.
*
* @param prevId ID of the last item in the previous batch. If there is no
* previous batch, a negative value can be used.
* @return Batch of encryption zones.
*/
@Idempotent
BatchedEntries<EncryptionZone> listEncryptionZones(
long prevId) throws IOException;
/**
* Set xattr of a file or directory.
* The name must be prefixed with the namespace followed by ".". For example,
* "user.attr".
* <p/>
* Refer to the HDFS extended attributes user documentation for details.
*
* @param src file or directory
* @param xAttr <code>XAttr</code> to set
* @param flag set flag
* @throws IOException
*/
@AtMostOnce
void setXAttr(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag)
throws IOException;
/**
* Get xattrs of a file or directory. Values in xAttrs parameter are ignored.
* If xAttrs is null or empty, this is the same as getting all xattrs of the
* file or directory. Only those xattrs for which the logged-in user has
* permissions to view are returned.
* <p/>
* Refer to the HDFS extended attributes user documentation for details.
*
* @param src file or directory
* @param xAttrs xAttrs to get
* @return List<XAttr> <code>XAttr</code> list
* @throws IOException
*/
@Idempotent
List<XAttr> getXAttrs(String src, List<XAttr> xAttrs)
throws IOException;
/**
* List the xattrs names for a file or directory.
* Only the xattr names for which the logged in user has the permissions to
* access will be returned.
* <p/>
* Refer to the HDFS extended attributes user documentation for details.
*
* @param src file or directory
* @return List<XAttr> <code>XAttr</code> list
* @throws IOException
*/
@Idempotent
List<XAttr> listXAttrs(String src)
throws IOException;
/**
* Remove xattr of a file or directory.Value in xAttr parameter is ignored.
* The name must be prefixed with the namespace followed by ".". For example,
* "user.attr".
* <p/>
* Refer to the HDFS extended attributes user documentation for details.
*
* @param src file or directory
* @param xAttr <code>XAttr</code> to remove
* @throws IOException
*/
@AtMostOnce
void removeXAttr(String src, XAttr xAttr) throws IOException;
/**
* Checks if the user can access a path. The mode specifies which access
* checks to perform. If the requested permissions are granted, then the
* method returns normally. If access is denied, then the method throws an
* {@link org.apache.hadoop.security.AccessControlException}.
* In general, applications should avoid using this method, due to the risk of
* time-of-check/time-of-use race conditions. The permissions on a file may
* change immediately after the access call returns.
*
* @param path Path to check
* @param mode type of access to check
* @throws org.apache.hadoop.security.AccessControlException if access is
* denied
* @throws java.io.FileNotFoundException if the path does not exist
* @throws IOException see specific implementation
*/
@Idempotent
void checkAccess(String path, FsAction mode) throws IOException;
/**
* Get the highest txid the NameNode knows has been written to the edit
* log, or -1 if the NameNode's edit log is not yet open for write. Used as
* the starting point for the inotify event stream.
*/
@Idempotent
long getCurrentEditLogTxid() throws IOException;
/**
* Get an ordered list of batches of events corresponding to the edit log
* transactions for txids equal to or greater than txid.
*/
@Idempotent
EventBatchList getEditsFromTxid(long txid) throws IOException;
}
| 57,568 | 37.845479 | 101 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import com.google.common.annotations.VisibleForTesting;
/**
* This class represents the primary identifier for a Datanode.
* Datanodes are identified by how they can be contacted (hostname
* and ports) and their storage ID, a unique number that associates
* the Datanodes blocks with a particular Datanode.
*
* {@link DatanodeInfo#getName()} should be used to get the network
* location (for topology) of a datanode, instead of using
* {@link DatanodeID#getXferAddr()} here. Helpers are defined below
* for each context in which a DatanodeID is used.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class DatanodeID implements Comparable<DatanodeID> {
public static final DatanodeID[] EMPTY_ARRAY = {};
private String ipAddr; // IP address
private String hostName; // hostname claimed by datanode
private String peerHostName; // hostname from the actual connection
private int xferPort; // data streaming port
private int infoPort; // info server port
private int infoSecurePort; // info server port
private int ipcPort; // IPC server port
private String xferAddr;
/**
* UUID identifying a given datanode. For upgraded Datanodes this is the
* same as the StorageID that was previously used by this Datanode.
* For newly formatted Datanodes it is a UUID.
*/
private final String datanodeUuid;
public DatanodeID(DatanodeID from) {
this(from.getDatanodeUuid(), from);
}
@VisibleForTesting
public DatanodeID(String datanodeUuid, DatanodeID from) {
this(from.getIpAddr(),
from.getHostName(),
datanodeUuid,
from.getXferPort(),
from.getInfoPort(),
from.getInfoSecurePort(),
from.getIpcPort());
this.peerHostName = from.getPeerHostName();
}
/**
* Create a DatanodeID
* @param ipAddr IP
* @param hostName hostname
* @param datanodeUuid data node ID, UUID for new Datanodes, may be the
* storage ID for pre-UUID datanodes. NULL if unknown
* e.g. if this is a new datanode. A new UUID will
* be assigned by the namenode.
* @param xferPort data transfer port
* @param infoPort info server port
* @param ipcPort ipc server port
*/
public DatanodeID(String ipAddr, String hostName, String datanodeUuid,
int xferPort, int infoPort, int infoSecurePort, int ipcPort) {
setIpAndXferPort(ipAddr, xferPort);
this.hostName = hostName;
this.datanodeUuid = checkDatanodeUuid(datanodeUuid);
this.infoPort = infoPort;
this.infoSecurePort = infoSecurePort;
this.ipcPort = ipcPort;
}
public void setIpAddr(String ipAddr) {
//updated during registration, preserve former xferPort
setIpAndXferPort(ipAddr, xferPort);
}
private void setIpAndXferPort(String ipAddr, int xferPort) {
// build xferAddr string to reduce cost of frequent use
this.ipAddr = ipAddr;
this.xferPort = xferPort;
this.xferAddr = ipAddr + ":" + xferPort;
}
public void setPeerHostName(String peerHostName) {
this.peerHostName = peerHostName;
}
/**
* @return data node ID.
*/
public String getDatanodeUuid() {
return datanodeUuid;
}
private String checkDatanodeUuid(String uuid) {
if (uuid == null || uuid.isEmpty()) {
return null;
} else {
return uuid;
}
}
/**
* @return ipAddr;
*/
public String getIpAddr() {
return ipAddr;
}
/**
* @return hostname
*/
public String getHostName() {
return hostName;
}
/**
* @return hostname from the actual connection
*/
public String getPeerHostName() {
return peerHostName;
}
/**
* @return IP:xferPort string
*/
public String getXferAddr() {
return xferAddr;
}
/**
* @return IP:ipcPort string
*/
private String getIpcAddr() {
return ipAddr + ":" + ipcPort;
}
/**
* @return IP:infoPort string
*/
public String getInfoAddr() {
return ipAddr + ":" + infoPort;
}
/**
* @return IP:infoPort string
*/
public String getInfoSecureAddr() {
return ipAddr + ":" + infoSecurePort;
}
/**
* @return hostname:xferPort
*/
public String getXferAddrWithHostname() {
return hostName + ":" + xferPort;
}
/**
* @return hostname:ipcPort
*/
private String getIpcAddrWithHostname() {
return hostName + ":" + ipcPort;
}
/**
* @param useHostname true to use the DN hostname, use the IP otherwise
* @return name:xferPort
*/
public String getXferAddr(boolean useHostname) {
return useHostname ? getXferAddrWithHostname() : getXferAddr();
}
/**
* @param useHostname true to use the DN hostname, use the IP otherwise
* @return name:ipcPort
*/
public String getIpcAddr(boolean useHostname) {
return useHostname ? getIpcAddrWithHostname() : getIpcAddr();
}
/**
* @return xferPort (the port for data streaming)
*/
public int getXferPort() {
return xferPort;
}
/**
* @return infoPort (the port at which the HTTP server bound to)
*/
public int getInfoPort() {
return infoPort;
}
/**
* @return infoSecurePort (the port at which the HTTPS server bound to)
*/
public int getInfoSecurePort() {
return infoSecurePort;
}
/**
* @return ipcPort (the port at which the IPC server bound to)
*/
public int getIpcPort() {
return ipcPort;
}
@Override
public boolean equals(Object to) {
if (this == to) {
return true;
}
if (!(to instanceof DatanodeID)) {
return false;
}
return (getXferAddr().equals(((DatanodeID)to).getXferAddr()) &&
datanodeUuid.equals(((DatanodeID)to).getDatanodeUuid()));
}
@Override
public int hashCode() {
return datanodeUuid.hashCode();
}
@Override
public String toString() {
return getXferAddr();
}
/**
* Update fields when a new registration request comes in.
* Note that this does not update storageID.
*/
public void updateRegInfo(DatanodeID nodeReg) {
setIpAndXferPort(nodeReg.getIpAddr(), nodeReg.getXferPort());
hostName = nodeReg.getHostName();
peerHostName = nodeReg.getPeerHostName();
infoPort = nodeReg.getInfoPort();
infoSecurePort = nodeReg.getInfoSecurePort();
ipcPort = nodeReg.getIpcPort();
}
/**
* Compare based on data transfer address.
*
* @param that datanode to compare with
* @return as specified by Comparable
*/
@Override
public int compareTo(DatanodeID that) {
return getXferAddr().compareTo(that.getXferAddr());
}
}
| 7,540 | 25.932143 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/FsPermissionExtension.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.permission.FsPermission;
/**
* HDFS permission subclass used to indicate an ACL is present and/or that the
* underlying file/dir is encrypted. The ACL/encrypted bits are not visible
* directly to users of {@link FsPermission} serialization. This is
* done for backwards compatibility in case any existing clients assume the
* value of FsPermission is in a particular range.
*/
@InterfaceAudience.Private
public class FsPermissionExtension extends FsPermission {
private final static short ACL_BIT = 1 << 12;
private final static short ENCRYPTED_BIT = 1 << 13;
private final boolean aclBit;
private final boolean encryptedBit;
/**
* Constructs a new FsPermissionExtension based on the given FsPermission.
*
* @param perm FsPermission containing permission bits
*/
public FsPermissionExtension(FsPermission perm, boolean hasAcl,
boolean isEncrypted) {
super(perm.toShort());
aclBit = hasAcl;
encryptedBit = isEncrypted;
}
/**
* Creates a new FsPermissionExtension by calling the base class constructor.
*
* @param perm short containing permission bits
*/
public FsPermissionExtension(short perm) {
super(perm);
aclBit = (perm & ACL_BIT) != 0;
encryptedBit = (perm & ENCRYPTED_BIT) != 0;
}
@Override
public short toExtendedShort() {
return (short)(toShort() |
(aclBit ? ACL_BIT : 0) | (encryptedBit ? ENCRYPTED_BIT : 0));
}
@Override
public boolean getAclBit() {
return aclBit;
}
@Override
public boolean getEncryptedBit() {
return encryptedBit;
}
@Override
public boolean equals(Object o) {
// This intentionally delegates to the base class. This is only overridden
// to suppress a FindBugs warning.
return super.equals(o);
}
@Override
public int hashCode() {
// This intentionally delegates to the base class. This is only overridden
// to suppress a FindBugs warning.
return super.hashCode();
}
}
| 2,899 | 31.222222 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import java.util.Date;
import org.apache.commons.lang.builder.EqualsBuilder;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.Path;
import com.google.common.base.Preconditions;
import org.apache.hadoop.hdfs.DFSUtilClient;
/**
* Describes a path-based cache directive.
*/
@InterfaceStability.Evolving
@InterfaceAudience.Public
public class CacheDirectiveInfo {
/**
* A builder for creating new CacheDirectiveInfo instances.
*/
public static class Builder {
private Long id;
private Path path;
private Short replication;
private String pool;
private Expiration expiration;
/**
* Builds a new CacheDirectiveInfo populated with the set properties.
*
* @return New CacheDirectiveInfo.
*/
public CacheDirectiveInfo build() {
return new CacheDirectiveInfo(id, path, replication, pool, expiration);
}
/**
* Creates an empty builder.
*/
public Builder() {
}
/**
* Creates a builder with all elements set to the same values as the
* given CacheDirectiveInfo.
*/
public Builder(CacheDirectiveInfo directive) {
this.id = directive.getId();
this.path = directive.getPath();
this.replication = directive.getReplication();
this.pool = directive.getPool();
this.expiration = directive.getExpiration();
}
/**
* Sets the id used in this request.
*
* @param id The id used in this request.
* @return This builder, for call chaining.
*/
public Builder setId(Long id) {
this.id = id;
return this;
}
/**
* Sets the path used in this request.
*
* @param path The path used in this request.
* @return This builder, for call chaining.
*/
public Builder setPath(Path path) {
this.path = path;
return this;
}
/**
* Sets the replication used in this request.
*
* @param replication The replication used in this request.
* @return This builder, for call chaining.
*/
public Builder setReplication(Short replication) {
this.replication = replication;
return this;
}
/**
* Sets the pool used in this request.
*
* @param pool The pool used in this request.
* @return This builder, for call chaining.
*/
public Builder setPool(String pool) {
this.pool = pool;
return this;
}
/**
* Sets when the CacheDirective should expire. A
* {@link CacheDirectiveInfo.Expiration} can specify either an absolute or
* relative expiration time.
*
* @param expiration when this CacheDirective should expire
* @return This builder, for call chaining
*/
public Builder setExpiration(Expiration expiration) {
this.expiration = expiration;
return this;
}
}
/**
* Denotes a relative or absolute expiration time for a CacheDirective. Use
* factory methods {@link CacheDirectiveInfo.Expiration#newAbsolute(Date)} and
* {@link CacheDirectiveInfo.Expiration#newRelative(long)} to create an
* Expiration.
* <p>
* In either case, the server-side clock is used to determine when a
* CacheDirective expires.
*/
public static class Expiration {
/**
* The maximum value we accept for a relative expiry.
*/
public static final long MAX_RELATIVE_EXPIRY_MS =
Long.MAX_VALUE / 4; // This helps prevent weird overflow bugs
/**
* An relative Expiration that never expires.
*/
public static final Expiration NEVER = newRelative(MAX_RELATIVE_EXPIRY_MS);
/**
* Create a new relative Expiration.
* <p>
* Use {@link Expiration#NEVER} to indicate an Expiration that never
* expires.
*
* @param ms how long until the CacheDirective expires, in milliseconds
* @return A relative Expiration
*/
public static Expiration newRelative(long ms) {
return new Expiration(ms, true);
}
/**
* Create a new absolute Expiration.
* <p>
* Use {@link Expiration#NEVER} to indicate an Expiration that never
* expires.
*
* @param date when the CacheDirective expires
* @return An absolute Expiration
*/
public static Expiration newAbsolute(Date date) {
return new Expiration(date.getTime(), false);
}
/**
* Create a new absolute Expiration.
* <p>
* Use {@link Expiration#NEVER} to indicate an Expiration that never
* expires.
*
* @param ms when the CacheDirective expires, in milliseconds since the Unix
* epoch.
* @return An absolute Expiration
*/
public static Expiration newAbsolute(long ms) {
return new Expiration(ms, false);
}
private final long ms;
private final boolean isRelative;
private Expiration(long ms, boolean isRelative) {
if (isRelative) {
Preconditions.checkArgument(ms <= MAX_RELATIVE_EXPIRY_MS,
"Expiration time is too far in the future!");
}
this.ms = ms;
this.isRelative = isRelative;
}
/**
* @return true if Expiration was specified as a relative duration, false if
* specified as an absolute time.
*/
public boolean isRelative() {
return isRelative;
}
/**
* @return The raw underlying millisecond value, either a relative duration
* or an absolute time as milliseconds since the Unix epoch.
*/
public long getMillis() {
return ms;
}
/**
* @return Expiration time as a {@link Date} object. This converts a
* relative Expiration into an absolute Date based on the local
* clock.
*/
public Date getAbsoluteDate() {
return new Date(getAbsoluteMillis());
}
/**
* @return Expiration time in milliseconds from the Unix epoch. This
* converts a relative Expiration into an absolute time based on the
* local clock.
*/
public long getAbsoluteMillis() {
if (!isRelative) {
return ms;
} else {
return new Date().getTime() + ms;
}
}
@Override
public String toString() {
if (isRelative) {
return DFSUtilClient.durationToString(ms);
}
return DFSUtilClient.dateToIso8601String(new Date(ms));
}
}
private final Long id;
private final Path path;
private final Short replication;
private final String pool;
private final Expiration expiration;
CacheDirectiveInfo(Long id, Path path, Short replication, String pool,
Expiration expiration) {
this.id = id;
this.path = path;
this.replication = replication;
this.pool = pool;
this.expiration = expiration;
}
/**
* @return The ID of this directive.
*/
public Long getId() {
return id;
}
/**
* @return The path used in this request.
*/
public Path getPath() {
return path;
}
/**
* @return The number of times the block should be cached.
*/
public Short getReplication() {
return replication;
}
/**
* @return The pool used in this request.
*/
public String getPool() {
return pool;
}
/**
* @return When this directive expires.
*/
public Expiration getExpiration() {
return expiration;
}
@Override
public boolean equals(Object o) {
if (o == null) {
return false;
}
if (getClass() != o.getClass()) {
return false;
}
CacheDirectiveInfo other = (CacheDirectiveInfo)o;
return new EqualsBuilder().append(getId(), other.getId()).
append(getPath(), other.getPath()).
append(getReplication(), other.getReplication()).
append(getPool(), other.getPool()).
append(getExpiration(), other.getExpiration()).
isEquals();
}
@Override
public int hashCode() {
return new HashCodeBuilder().append(id).
append(path).
append(replication).
append(pool).
append(expiration).
hashCode();
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("{");
String prefix = "";
if (id != null) {
builder.append(prefix).append("id: ").append(id);
prefix = ", ";
}
if (path != null) {
builder.append(prefix).append("path: ").append(path);
prefix = ", ";
}
if (replication != null) {
builder.append(prefix).append("replication: ").append(replication);
prefix = ", ";
}
if (pool != null) {
builder.append(prefix).append("pool: ").append(pool);
prefix = ", ";
}
if (expiration != null) {
builder.append(prefix).append("expiration: ").append(expiration);
prefix = ", ";
}
builder.append("}");
return builder.toString();
}
};
| 9,790 | 26.272981 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/BlockStoragePolicy.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import java.util.Arrays;
import java.util.EnumSet;
import java.util.LinkedList;
import java.util.List;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.BlockStoragePolicySpi;
import org.apache.hadoop.fs.StorageType;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A block storage policy describes how to select the storage types
* for the replicas of a block.
*/
@InterfaceAudience.Private
public class BlockStoragePolicy implements BlockStoragePolicySpi {
public static final Logger LOG = LoggerFactory.getLogger(BlockStoragePolicy
.class);
/** A 4-bit policy ID */
private final byte id;
/** Policy name */
private final String name;
/** The storage types to store the replicas of a new block. */
private final StorageType[] storageTypes;
/** The fallback storage type for block creation. */
private final StorageType[] creationFallbacks;
/** The fallback storage type for replication. */
private final StorageType[] replicationFallbacks;
/**
* Whether the policy is inherited during file creation.
* If set then the policy cannot be changed after file creation.
*/
private boolean copyOnCreateFile;
@VisibleForTesting
public BlockStoragePolicy(byte id, String name, StorageType[] storageTypes,
StorageType[] creationFallbacks, StorageType[] replicationFallbacks) {
this(id, name, storageTypes, creationFallbacks, replicationFallbacks,
false);
}
@VisibleForTesting
public BlockStoragePolicy(byte id, String name, StorageType[] storageTypes,
StorageType[] creationFallbacks, StorageType[] replicationFallbacks,
boolean copyOnCreateFile) {
this.id = id;
this.name = name;
this.storageTypes = storageTypes;
this.creationFallbacks = creationFallbacks;
this.replicationFallbacks = replicationFallbacks;
this.copyOnCreateFile = copyOnCreateFile;
}
/**
* @return a list of {@link StorageType}s for storing the replicas of a block.
*/
public List<StorageType> chooseStorageTypes(final short replication) {
final List<StorageType> types = new LinkedList<StorageType>();
int i = 0, j = 0;
// Do not return transient storage types. We will not have accurate
// usage information for transient types.
for (;i < replication && j < storageTypes.length; ++j) {
if (!storageTypes[j].isTransient()) {
types.add(storageTypes[j]);
++i;
}
}
final StorageType last = storageTypes[storageTypes.length - 1];
if (!last.isTransient()) {
for (; i < replication; i++) {
types.add(last);
}
}
return types;
}
/**
* Choose the storage types for storing the remaining replicas, given the
* replication number and the storage types of the chosen replicas.
*
* @param replication the replication number.
* @param chosen the storage types of the chosen replicas.
* @return a list of {@link StorageType}s for storing the replicas of a block.
*/
public List<StorageType> chooseStorageTypes(final short replication,
final Iterable<StorageType> chosen) {
return chooseStorageTypes(replication, chosen, null);
}
private List<StorageType> chooseStorageTypes(final short replication,
final Iterable<StorageType> chosen, final List<StorageType> excess) {
final List<StorageType> types = chooseStorageTypes(replication);
diff(types, chosen, excess);
return types;
}
/**
* Choose the storage types for storing the remaining replicas, given the
* replication number, the storage types of the chosen replicas and
* the unavailable storage types. It uses fallback storage in case that
* the desired storage type is unavailable.
*
* @param replication the replication number.
* @param chosen the storage types of the chosen replicas.
* @param unavailables the unavailable storage types.
* @param isNewBlock Is it for new block creation?
* @return a list of {@link StorageType}s for storing the replicas of a block.
*/
public List<StorageType> chooseStorageTypes(final short replication,
final Iterable<StorageType> chosen,
final EnumSet<StorageType> unavailables,
final boolean isNewBlock) {
final List<StorageType> excess = new LinkedList<StorageType>();
final List<StorageType> storageTypes = chooseStorageTypes(
replication, chosen, excess);
final int expectedSize = storageTypes.size() - excess.size();
final List<StorageType> removed = new LinkedList<StorageType>();
for(int i = storageTypes.size() - 1; i >= 0; i--) {
// replace/remove unavailable storage types.
final StorageType t = storageTypes.get(i);
if (unavailables.contains(t)) {
final StorageType fallback = isNewBlock?
getCreationFallback(unavailables)
: getReplicationFallback(unavailables);
if (fallback == null) {
removed.add(storageTypes.remove(i));
} else {
storageTypes.set(i, fallback);
}
}
}
// remove excess storage types after fallback replacement.
diff(storageTypes, excess, null);
if (storageTypes.size() < expectedSize) {
LOG.warn("Failed to place enough replicas: expected size is {}"
+ " but only {} storage types can be selected (replication={},"
+ " selected={}, unavailable={}" + ", removed={}" + ", policy={}"
+ ")", expectedSize, storageTypes.size(), replication, storageTypes,
unavailables, removed, this);
}
return storageTypes;
}
/**
* Compute the difference between two lists t and c so that after the diff
* computation we have: t = t - c;
* Further, if e is not null, set e = e + c - t;
*/
private static void diff(List<StorageType> t, Iterable<StorageType> c,
List<StorageType> e) {
for(StorageType storagetype : c) {
final int i = t.indexOf(storagetype);
if (i >= 0) {
t.remove(i);
} else if (e != null) {
e.add(storagetype);
}
}
}
/**
* Choose excess storage types for deletion, given the
* replication number and the storage types of the chosen replicas.
*
* @param replication the replication number.
* @param chosen the storage types of the chosen replicas.
* @return a list of {@link StorageType}s for deletion.
*/
public List<StorageType> chooseExcess(final short replication,
final Iterable<StorageType> chosen) {
final List<StorageType> types = chooseStorageTypes(replication);
final List<StorageType> excess = new LinkedList<StorageType>();
diff(types, chosen, excess);
return excess;
}
/** @return the fallback {@link StorageType} for creation. */
public StorageType getCreationFallback(EnumSet<StorageType> unavailables) {
return getFallback(unavailables, creationFallbacks);
}
/** @return the fallback {@link StorageType} for replication. */
public StorageType getReplicationFallback(EnumSet<StorageType> unavailables) {
return getFallback(unavailables, replicationFallbacks);
}
@Override
public int hashCode() {
return Byte.valueOf(id).hashCode();
}
@Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
} else if (obj == null || !(obj instanceof BlockStoragePolicy)) {
return false;
}
final BlockStoragePolicy that = (BlockStoragePolicy)obj;
return this.id == that.id;
}
@Override
public String toString() {
return getClass().getSimpleName() + "{" + name + ":" + id
+ ", storageTypes=" + Arrays.asList(storageTypes)
+ ", creationFallbacks=" + Arrays.asList(creationFallbacks)
+ ", replicationFallbacks=" + Arrays.asList(replicationFallbacks) + "}";
}
public byte getId() {
return id;
}
@Override
public String getName() {
return name;
}
@Override
public StorageType[] getStorageTypes() {
return this.storageTypes;
}
@Override
public StorageType[] getCreationFallbacks() {
return this.creationFallbacks;
}
@Override
public StorageType[] getReplicationFallbacks() {
return this.replicationFallbacks;
}
private static StorageType getFallback(EnumSet<StorageType> unavailables,
StorageType[] fallbacks) {
for(StorageType fb : fallbacks) {
if (!unavailables.contains(fb)) {
return fb;
}
}
return null;
}
@Override
public boolean isCopyOnCreateFile() {
return copyOnCreateFile;
}
}
| 9,395 | 33.043478 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.protocol;
import org.apache.hadoop.fs.StorageType;
import java.util.UUID;
/**
* Class captures information of a storage in Datanode.
*/
public class DatanodeStorage {
/** The state of the storage. */
public enum State {
NORMAL,
/**
* A storage that represents a read-only path to replicas stored on a shared storage device.
* Replicas on {@link #READ_ONLY_SHARED} storage are not counted towards live replicas.
*
* <p>
* In certain implementations, a {@link #READ_ONLY_SHARED} storage may be correlated to
* its {@link #NORMAL} counterpart using the {@link DatanodeStorage#storageID}. This
* property should be used for debugging purposes only.
* </p>
*/
READ_ONLY_SHARED,
FAILED;
}
private final String storageID;
private final State state;
private final StorageType storageType;
private static final String STORAGE_ID_PREFIX = "DS-";
/**
* Create a storage with {@link State#NORMAL} and {@link StorageType#DEFAULT}.
*/
public DatanodeStorage(String storageID) {
this(storageID, State.NORMAL, StorageType.DEFAULT);
}
public DatanodeStorage(String sid, State s, StorageType sm) {
this.storageID = sid;
this.state = s;
this.storageType = sm;
}
public String getStorageID() {
return storageID;
}
public State getState() {
return state;
}
public StorageType getStorageType() {
return storageType;
}
/**
* Generate new storage ID. The format of this string can be changed
* in the future without requiring that old storage IDs be updated.
*
* @return unique storage ID
*/
public static String generateUuid() {
return STORAGE_ID_PREFIX + UUID.randomUUID();
}
/**
* Verify that a given string is a storage ID in the "DS-..uuid.." format.
*/
public static boolean isValidStorageId(final String storageID) {
try {
// Attempt to parse the UUID.
if (storageID != null && storageID.indexOf(STORAGE_ID_PREFIX) == 0) {
UUID.fromString(storageID.substring(STORAGE_ID_PREFIX.length()));
return true;
}
} catch (IllegalArgumentException iae) {
}
return false;
}
@Override
public String toString() {
return "DatanodeStorage["+ storageID + "," + storageType + "," + state +"]";
}
@Override
public boolean equals(Object other){
if (other == this) {
return true;
}
if ((other == null) ||
!(other instanceof DatanodeStorage)) {
return false;
}
DatanodeStorage otherStorage = (DatanodeStorage) other;
return otherStorage.getStorageID().compareTo(getStorageID()) == 0;
}
@Override
public int hashCode() {
return getStorageID().hashCode();
}
}
| 3,585 | 27.23622 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/StorageReport.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.protocol;
/**
* Utilization report for a Datanode storage
*/
public class StorageReport {
private final DatanodeStorage storage;
private final boolean failed;
private final long capacity;
private final long dfsUsed;
private final long remaining;
private final long blockPoolUsed;
public static final StorageReport[] EMPTY_ARRAY = {};
public StorageReport(DatanodeStorage storage, boolean failed,
long capacity, long dfsUsed, long remaining, long bpUsed) {
this.storage = storage;
this.failed = failed;
this.capacity = capacity;
this.dfsUsed = dfsUsed;
this.remaining = remaining;
this.blockPoolUsed = bpUsed;
}
public DatanodeStorage getStorage() {
return storage;
}
public boolean isFailed() {
return failed;
}
public long getCapacity() {
return capacity;
}
public long getDfsUsed() {
return dfsUsed;
}
public long getRemaining() {
return remaining;
}
public long getBlockPoolUsed() {
return blockPoolUsed;
}
}
| 1,866 | 26.865672 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorageReport.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.protocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
/**
* Class captures information of a datanode and its storages.
*/
public class DatanodeStorageReport {
final DatanodeInfo datanodeInfo;
final StorageReport[] storageReports;
public DatanodeStorageReport(DatanodeInfo datanodeInfo,
StorageReport[] storageReports) {
this.datanodeInfo = datanodeInfo;
this.storageReports = storageReports;
}
public DatanodeInfo getDatanodeInfo() {
return datanodeInfo;
}
public StorageReport[] getStorageReports() {
return storageReports;
}
}
| 1,427 | 33 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/SafeModeException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* This exception is thrown when the name node is in safe mode.
* Client cannot modified namespace until the safe mode is off.
*
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class SafeModeException extends IOException {
private static final long serialVersionUID = 1L;
public SafeModeException(String msg) {
super(msg);
}
}
| 1,369 | 35.052632 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/NotReplicatedYetException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* The file has not finished being written to enough datanodes yet.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class NotReplicatedYetException extends IOException {
private static final long serialVersionUID = 1L;
public NotReplicatedYetException(String msg) {
super(msg);
}
}
| 1,322 | 33.815789 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/fs/TestFiRename.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.File;
import java.io.IOException;
import java.util.EnumSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.test.PathUtils;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import static org.apache.hadoop.fs.FileContextTestHelper.*;
/**
* Rename names src to dst. Rename is done using following steps:
* <ul>
* <li>Checks are made to ensure src exists and appropriate flags are being
* passed to overwrite existing destination.
* <li>src is removed.
* <li>dst if it exists is removed.
* <li>src is renamed and added to directory tree as dst.
* </ul>
*
* During any of the above steps, the state of src and dst is reverted back to
* what it was prior to rename. This test ensures that the state is reverted
* back.
*
* This test uses AspectJ to simulate failures.
*/
public class TestFiRename {
private static final Log LOG = LogFactory.getLog(TestFiRename.class);
private static String removeChild = "";
private static String addChild = "";
private static byte[] data = { 0 };
private static String TEST_ROOT_DIR = PathUtils.getTestDirName(TestFiRename.class);
private static Configuration CONF = new Configuration();
static {
CONF.setInt("io.bytes.per.checksum", 1);
}
private MiniDFSCluster cluster = null;
private FileContext fc = null;
@Before
public void setup() throws IOException {
restartCluster(true);
}
@After
public void teardown() throws IOException {
if (fc != null) {
fc.delete(getTestRootPath(), true);
}
if (cluster != null) {
cluster.shutdown();
}
}
private void restartCluster(boolean format) throws IOException {
if (cluster != null) {
cluster.shutdown();
cluster = null;
}
cluster = new MiniDFSCluster.Builder(CONF).format(format).build();
cluster.waitClusterUp();
fc = FileContext.getFileContext(cluster.getURI(0), CONF);
}
/**
* Returns true to indicate an exception should be thrown to simulate failure
* during removal of a node from directory tree.
*/
public static boolean throwExceptionOnRemove(String child) {
boolean status = removeChild.endsWith(child);
if (status) {
removeChild = "";
}
return status;
}
/**
* Returns true to indicate an exception should be thrown to simulate failure
* during addition of a node to directory tree.
*/
public static boolean throwExceptionOnAdd(String child) {
boolean status = addChild.endsWith(child);
if (status) {
addChild = "";
}
return status;
}
/** Set child name on removal of which failure should be simulated */
public static void exceptionOnRemove(String child) {
removeChild = child;
addChild = "";
}
/** Set child name on addition of which failure should be simulated */
public static void exceptionOnAdd(String child) {
removeChild = "";
addChild = child;
}
private Path getTestRootPath() {
return fc.makeQualified(new Path(TEST_ROOT_DIR));
}
private Path getTestPath(String pathString) {
return fc.makeQualified(new Path(TEST_ROOT_DIR, pathString));
}
private void createFile(Path path) throws IOException {
FSDataOutputStream out = fc.create(path, EnumSet.of(CreateFlag.CREATE),
Options.CreateOpts.createParent());
out.write(data, 0, data.length);
out.close();
}
/** Rename test when src exists and dst does not */
@Test
public void testFailureNonExistentDst() throws Exception {
final Path src = getTestPath("testFailureNonExistenSrc/dir/src");
final Path dst = getTestPath("testFailureNonExistenSrc/newdir/dst");
createFile(src);
// During rename, while removing src, an exception is thrown
TestFiRename.exceptionOnRemove(src.toString());
rename(src, dst, true, true, false, Rename.NONE);
// During rename, while adding dst an exception is thrown
TestFiRename.exceptionOnAdd(dst.toString());
rename(src, dst, true, true, false, Rename.NONE);
}
/** Rename test when src and dst exist */
@Test
public void testFailuresExistingDst() throws Exception {
final Path src = getTestPath("testFailuresExistingDst/dir/src");
final Path dst = getTestPath("testFailuresExistingDst/newdir/dst");
createFile(src);
createFile(dst);
// During rename, while removing src, an exception is thrown
TestFiRename.exceptionOnRemove(src.toString());
rename(src, dst, true, true, true, Rename.OVERWRITE);
// During rename, while removing dst, an exception is thrown
TestFiRename.exceptionOnRemove(dst.toString());
rename(src, dst, true, true, true, Rename.OVERWRITE);
// During rename, while adding dst an exception is thrown
TestFiRename.exceptionOnAdd(dst.toString());
rename(src, dst, true, true, true, Rename.OVERWRITE);
}
/** Rename test where both src and dst are files */
@Test
public void testDeletionOfDstFile() throws Exception {
Path src = getTestPath("testDeletionOfDstFile/dir/src");
Path dst = getTestPath("testDeletionOfDstFile/newdir/dst");
createFile(src);
createFile(dst);
final FSNamesystem namesystem = cluster.getNamesystem();
final long blocks = namesystem.getBlocksTotal();
final long fileCount = namesystem.getFilesTotal();
rename(src, dst, false, false, true, Rename.OVERWRITE);
// After successful rename the blocks corresponing dst are deleted
Assert.assertEquals(blocks - 1, namesystem.getBlocksTotal());
// After successful rename dst file is deleted
Assert.assertEquals(fileCount - 1, namesystem.getFilesTotal());
// Restart the cluster to ensure new rename operation
// recorded in editlog is processed right
restartCluster(false);
int count = 0;
boolean exception = true;
src = getTestPath("testDeletionOfDstFile/dir/src");
dst = getTestPath("testDeletionOfDstFile/newdir/dst");
while (exception && count < 5) {
try {
exists(fc, src);
exception = false;
} catch (Exception e) {
LOG.warn("Exception " + " count " + count + " " + e.getMessage());
Thread.sleep(1000);
count++;
}
}
Assert.assertFalse(exists(fc, src));
Assert.assertTrue(exists(fc, dst));
}
/** Rename test where both src and dst are directories */
@Test
public void testDeletionOfDstDirectory() throws Exception {
Path src = getTestPath("testDeletionOfDstDirectory/dir/src");
Path dst = getTestPath("testDeletionOfDstDirectory/newdir/dst");
fc.mkdir(src, FileContext.DEFAULT_PERM, true);
fc.mkdir(dst, FileContext.DEFAULT_PERM, true);
FSNamesystem namesystem = cluster.getNamesystem();
long fileCount = namesystem.getFilesTotal();
rename(src, dst, false, false, true, Rename.OVERWRITE);
// After successful rename dst directory is deleted
Assert.assertEquals(fileCount - 1, namesystem.getFilesTotal());
// Restart the cluster to ensure new rename operation
// recorded in editlog is processed right
restartCluster(false);
src = getTestPath("testDeletionOfDstDirectory/dir/src");
dst = getTestPath("testDeletionOfDstDirectory/newdir/dst");
int count = 0;
boolean exception = true;
while (exception && count < 5) {
try {
exists(fc, src);
exception = false;
} catch (Exception e) {
LOG.warn("Exception " + " count " + count + " " + e.getMessage());
Thread.sleep(1000);
count++;
}
}
Assert.assertFalse(exists(fc, src));
Assert.assertTrue(exists(fc, dst));
}
private void rename(Path src, Path dst, boolean exception, boolean srcExists,
boolean dstExists, Rename... options) throws IOException {
try {
fc.rename(src, dst, options);
Assert.assertFalse("Expected exception is not thrown", exception);
} catch (Exception e) {
LOG.warn("Exception ", e);
Assert.assertTrue(exception);
}
Assert.assertEquals(srcExists, exists(fc, src));
Assert.assertEquals(dstExists, exists(fc, dst));
}
}
| 9,199 | 32.699634 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/fs/TestFiListPath.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.fail;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.Iterator;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.util.StringUtils;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
/**
* A large directory listing may have to go through multiple RPCs.
* The directory to be listed may be removed before all contents are listed.
*
* This test uses AspectJ to simulate the scenario.
*/
public class TestFiListPath {
private static final Log LOG = LogFactory.getLog(TestFiListPath.class);
private static final int LIST_LIMIT = 1;
private static MiniDFSCluster cluster = null;
private static FileSystem fs;
private static Path TEST_PATH = new Path("/tmp");
@BeforeClass
public static void setup() throws IOException {
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_LIST_LIMIT, LIST_LIMIT);
cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitClusterUp();
fs = cluster.getFileSystem();
}
@AfterClass
public static void teardown() throws IOException {
if (cluster != null) {
cluster.shutdown();
}
}
@Before
public void prepare() throws IOException {
fs.mkdirs(TEST_PATH);
for (int i=0; i<LIST_LIMIT+1; i++) {
fs.mkdirs(new Path(TEST_PATH, "dir"+i));
}
}
@After
public void cleanup() throws IOException {
fs.delete(TEST_PATH, true);
}
/** Remove the target directory after the getListing RPC */
@Test
public void testTargetDeletionForListStatus() throws Exception {
LOG.info("Test Target Delete For listStatus");
try {
fs.listStatus(TEST_PATH);
fail("Test should fail with FileNotFoundException");
} catch (FileNotFoundException e) {
assertEquals("File " + TEST_PATH + " does not exist.", e.getMessage());
LOG.info(StringUtils.stringifyException(e));
}
}
/** Remove the target directory after the getListing RPC */
@Test
public void testTargetDeletionForListLocatedStatus() throws Exception {
LOG.info("Test Target Delete For listLocatedStatus");
RemoteIterator<LocatedFileStatus> itor = fs.listLocatedStatus(TEST_PATH);
itor.next();
assertFalse (itor.hasNext());
}
}
| 3,506 | 32.084906 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/fi/FiTestUtil.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fi;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/** Test Utilities */
public class FiTestUtil {
/** Logging */
public static final Log LOG = LogFactory.getLog(FiTestUtil.class);
/** Random source */
public static final ThreadLocal<Random> RANDOM = new ThreadLocal<Random>() {
protected Random initialValue() {
final Random r = new Random();
final long seed = r.nextLong();
LOG.info(Thread.currentThread() + ": seed=" + seed);
r.setSeed(seed);
return r;
}
};
/**
* Return a random integer uniformly distributed over the interval [min,max).
*/
public static int nextRandomInt(final int min, final int max) {
final int d = max - min;
if (d <= 0) {
throw new IllegalArgumentException("d <= 0, min=" + min + ", max=" + max);
}
return d == 1? min: min + RANDOM.get().nextInt(d);
}
/**
* Return a random integer, with type long,
* uniformly distributed over the interval [min,max).
* Assume max - min <= Integer.MAX_VALUE.
*/
public static long nextRandomLong(final long min, final long max) {
final long d = max - min;
if (d <= 0 || d > Integer.MAX_VALUE) {
throw new IllegalArgumentException(
"d <= 0 || d > Integer.MAX_VALUE, min=" + min + ", max=" + max);
}
return d == 1? min: min + RANDOM.get().nextInt((int)d);
}
/** Return the method name of the callee. */
public static String getMethodName() {
final StackTraceElement[] s = Thread.currentThread().getStackTrace();
return s[s.length > 2? 2: s.length - 1].getMethodName();
}
/**
* Sleep.
* @return true if sleep exits normally; false if InterruptedException.
*/
public static boolean sleep(long ms) {
LOG.info("Sleep " + ms + " ms");
try {
Thread.sleep(ms);
} catch (InterruptedException e) {
LOG.info("Sleep is interrupted", e);
return false;
}
return true;
}
/**
* Sleep a random number of milliseconds over the interval [min, max).
* If there is an InterruptedException, re-throw it as a RuntimeException.
*/
public static void sleep(final long min, final long max) {
final long n = nextRandomLong(min, max);
LOG.info(Thread.currentThread().getName() + " sleeps for " + n +"ms");
if (n > 0) {
sleep(n);
}
}
/** Action interface */
public static interface Action<T, E extends Exception> {
/** Run the action with the parameter. */
public void run(T parameter) throws E;
}
/** An ActionContainer contains at most one action. */
public static class ActionContainer<T, E extends Exception> {
private List<Action<T, E>> actionList = new ArrayList<Action<T, E>>();
/** Create an empty container. */
public ActionContainer() {}
/** Set action. */
public void set(Action<T, E> a) {actionList.add(a);}
/** Run the action if it exists. */
public void run(T obj) throws E {
for (Action<T, E> action : actionList) {
action.run(obj);
}
}
}
/** Constraint interface */
public static interface Constraint {
/** Is this constraint satisfied? */
public boolean isSatisfied();
}
/** Counting down, the constraint is satisfied if the count is one. */
public static class CountdownConstraint implements Constraint {
private int count;
/** Initialize the count. */
public CountdownConstraint(int count) {
if (count < 1) {
throw new IllegalArgumentException(count + " = count < 1");
}
this.count = count;
}
/** Counting down, the constraint is satisfied if the count is zero. */
public boolean isSatisfied() {
if (count > 1) {
count--;
return false;
}
return true;
}
}
/** An action is fired if all the constraints are satisfied. */
public static class ConstraintSatisfactionAction<T, E extends Exception>
implements Action<T, E> {
private final Action<T, E> action;
private final Constraint[] constraints;
/** Constructor */
public ConstraintSatisfactionAction(
Action<T, E> action, Constraint... constraints) {
this.action = action;
this.constraints = constraints;
}
/**
* Fire the action if all the constraints are satisfied.
* Short-circuit-and is used.
*/
@Override
public final void run(T parameter) throws E {
for(Constraint c : constraints) {
if (!c.isSatisfied()) {
return;
}
}
//all constraints are satisfied, fire the action
action.run(parameter);
}
}
/** A MarkerConstraint is satisfied if it is marked. */
public static class MarkerConstraint implements Constraint {
private final String name;
private boolean marked = false;
/** Construct an object. */
public MarkerConstraint(String name) {
this.name = name;
}
/** Set marker to be marked. */
public void mark() {
marked = true;
LOG.info("Marking this " + this);
}
/** Is the marker marked? */
@Override
public boolean isSatisfied() {
return marked;
}
/** {@inheritDoc} */
public String toString() {
return getClass().getSimpleName() + "[" + name + ": " + marked + "]";
}
}
}
| 6,201 | 28.674641 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/fi/Pipeline.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fi;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import java.util.List;
import java.util.ArrayList;
public class Pipeline {
private final List<String> datanodes = new ArrayList<String>();
Pipeline(LocatedBlock lb) {
for(DatanodeInfo d : lb.getLocations()) {
datanodes.add(d.getName());
}
}
/** Does the pipeline contains d? */
public boolean contains(DatanodeID d) {
return datanodes.contains(d.getName());
}
/** Does the pipeline contains d at the n th position? */
public boolean contains(int n, DatanodeID d) {
return d.getName().equals(datanodes.get(n));
}
@Override
public String toString() {
return getClass().getSimpleName() + datanodes;
}
}
| 1,650 | 31.372549 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/fi/FiConfig.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fi;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.HdfsConfiguration;
/**
* This class wraps the logic around fault injection configuration file
* Default file is expected to be found in src/test/fi-site.xml
* This default file should be copied by JUnit Ant's tasks to
* build/test/extraconf folder before tests are ran
* An alternative location can be set through
* -Dfi.config=<file_name>
*/
public class FiConfig {
private static final String CONFIG_PARAMETER = ProbabilityModel.FPROB_NAME + "config";
private static final String DEFAULT_CONFIG = "fi-site.xml";
private static Configuration conf;
static {
init();
}
protected static void init () {
if (conf == null) {
conf = new HdfsConfiguration(false);
String configName = System.getProperty(CONFIG_PARAMETER, DEFAULT_CONFIG);
conf.addResource(configName);
}
}
/**
* Method provides access to local Configuration
*
* @return Configuration initialized with fault injection's parameters
*/
public static Configuration getConfig() {
return conf;
}
}
| 1,948 | 33.803571 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/fi/PipelineTest.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fi;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
/** A pipeline contains a list of datanodes. */
public interface PipelineTest {
public Pipeline initPipeline(LocatedBlock lb);
public Pipeline getPipelineForDatanode(DatanodeID id);
}
| 1,130 | 39.392857 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/fi/ProbabilityModel.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fi;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
/**
* This class is responsible for the decision of when a fault
* has to be triggered within a class of Hadoop
*
* Default probability of injection is set to 0%. To change it
* one can set the sys. prop. -Dfi.*=<new probability level>
* Another way to do so is to set this level through FI config file,
* located under src/test/fi-site.conf
*
* To change the level one has to specify the following sys,prop.:
* -Dfi.<name of fault location>=<probability level> in the runtime
* Probability level is specified by a float between 0.0 and 1.0
*
* <name of fault location> might be represented by a short classname
* or otherwise. This decision is left up to the discretion of aspects
* developer, but has to be consistent through the code
*/
public class ProbabilityModel {
private static Random generator = new Random();
private static final Log LOG = LogFactory.getLog(ProbabilityModel.class);
static final String FPROB_NAME = "fi.";
private static final String ALL_PROBABILITIES = FPROB_NAME + "*";
private static final float DEFAULT_PROB = 0.00f; //Default probability is 0%
private static final float MAX_PROB = 1.00f; // Max probability is 100%
private static Configuration conf = FiConfig.getConfig();
static {
// Set new default probability if specified through a system.property
// If neither is specified set default probability to DEFAULT_PROB
conf.set(ALL_PROBABILITIES,
System.getProperty(ALL_PROBABILITIES,
conf.get(ALL_PROBABILITIES, Float.toString(DEFAULT_PROB))));
LOG.info(ALL_PROBABILITIES + "=" + conf.get(ALL_PROBABILITIES));
}
/**
* Simplistic method to check if we have reached the point of injection
* @param klassName is the name of the probability level to check.
* If a configuration has been set for "fi.myClass" then you can check if the
* inject criteria has been reached by calling this method with "myClass"
* string as its parameter
* @return true if the probability threshold has been reached; false otherwise
*/
public static boolean injectCriteria(String klassName) {
boolean trigger = false;
// TODO fix this: make it more sophisticated!!!
if (generator.nextFloat() < getProbability(klassName)) {
trigger = true;
}
return trigger;
}
/**
* This primitive checks for arbitrary set of desired probability. If the
* level hasn't been set method will return default setting.
* The probability expected to be set as an float between 0.0 and 1.0
* @param klass is the name of the resource
* @return float representation of configured probability level of
* the requested resource or default value if hasn't been set
*/
protected static float getProbability(final String klass) {
String newProbName = FPROB_NAME + klass;
String newValue = System.getProperty(newProbName, conf.get(ALL_PROBABILITIES));
if (newValue != null && !newValue.equals(conf.get(newProbName)))
conf.set(newProbName, newValue);
float ret = conf.getFloat(newProbName,
conf.getFloat(ALL_PROBABILITIES, DEFAULT_PROB));
if(LOG.isDebugEnabled()) {
LOG.debug("Request for " + newProbName + " returns=" + ret);
}
// Make sure that probability level is valid.
if (ret < DEFAULT_PROB || ret > MAX_PROB)
ret = conf.getFloat(ALL_PROBABILITIES, DEFAULT_PROB);
return ret;
}
}
| 4,409 | 39.833333 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/fi/DataTransferTestUtil.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fi;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.fi.FiTestUtil.Action;
import org.apache.hadoop.fi.FiTestUtil.ActionContainer;
import org.apache.hadoop.fi.FiTestUtil.ConstraintSatisfactionAction;
import org.apache.hadoop.fi.FiTestUtil.CountdownConstraint;
import org.apache.hadoop.fi.FiTestUtil.MarkerConstraint;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
/**
* Utilities for DataTransferProtocol related tests,
* e.g. TestFiDataTransferProtocol.
*/
public class DataTransferTestUtil {
protected static PipelineTest thepipelinetest;
/** initialize pipeline test */
public static PipelineTest initTest() {
return thepipelinetest = new DataTransferTest();
}
/** get the pipeline test object */
public static PipelineTest getPipelineTest() {
return thepipelinetest;
}
/** get the pipeline test object cast to DataTransferTest */
public static DataTransferTest getDataTransferTest() {
return (DataTransferTest)getPipelineTest();
}
/**
* The DataTransferTest class includes a pipeline
* and some actions.
*/
public static class DataTransferTest implements PipelineTest {
private final List<Pipeline> pipelines = new ArrayList<Pipeline>();
private volatile boolean isSuccess = false;
/** Simulate action for the receiverOpWriteBlock pointcut */
public final ActionContainer<DatanodeID, IOException> fiReceiverOpWriteBlock
= new ActionContainer<DatanodeID, IOException>();
/** Simulate action for the callReceivePacket pointcut */
public final ActionContainer<DatanodeID, IOException> fiCallReceivePacket
= new ActionContainer<DatanodeID, IOException>();
/** Simulate action for the callWritePacketToDisk pointcut */
public final ActionContainer<DatanodeID, IOException> fiCallWritePacketToDisk
= new ActionContainer<DatanodeID, IOException>();
/** Simulate action for the statusRead pointcut */
public final ActionContainer<DatanodeID, IOException> fiStatusRead
= new ActionContainer<DatanodeID, IOException>();
/** Simulate action for the afterDownstreamStatusRead pointcut */
public final ActionContainer<DatanodeID, IOException> fiAfterDownstreamStatusRead
= new ActionContainer<DatanodeID, IOException>();
/** Simulate action for the pipelineAck pointcut */
public final ActionContainer<DatanodeID, IOException> fiPipelineAck
= new ActionContainer<DatanodeID, IOException>();
/** Simulate action for the pipelineClose pointcut */
public final ActionContainer<DatanodeID, IOException> fiPipelineClose
= new ActionContainer<DatanodeID, IOException>();
/** Simulate action for the blockFileClose pointcut */
public final ActionContainer<DatanodeID, IOException> fiBlockFileClose
= new ActionContainer<DatanodeID, IOException>();
/** Verification action for the pipelineInitNonAppend pointcut */
public final ActionContainer<Integer, RuntimeException> fiPipelineInitErrorNonAppend
= new ActionContainer<Integer, RuntimeException>();
/** Verification action for the pipelineErrorAfterInit pointcut */
public final ActionContainer<Integer, RuntimeException> fiPipelineErrorAfterInit
= new ActionContainer<Integer, RuntimeException>();
/** Get test status */
public boolean isSuccess() {
return this.isSuccess;
}
/** Set test status */
public void markSuccess() {
this.isSuccess = true;
}
/** Initialize the pipeline. */
@Override
public synchronized Pipeline initPipeline(LocatedBlock lb) {
final Pipeline pl = new Pipeline(lb);
if (pipelines.contains(pl)) {
throw new IllegalStateException("thepipeline != null");
}
pipelines.add(pl);
return pl;
}
/** Return the pipeline for the datanode. */
@Override
public synchronized Pipeline getPipelineForDatanode(DatanodeID id) {
for (Pipeline p : pipelines) {
if (p.contains(id)){
return p;
}
}
FiTestUtil.LOG.info("FI: pipeline not found; id=" + id
+ ", pipelines=" + pipelines);
return null;
}
/**
* Is the test not yet success
* and the last pipeline contains the given datanode?
*/
private synchronized boolean isNotSuccessAndLastPipelineContains(
int index, DatanodeID id) {
if (isSuccess()) {
return false;
}
final int n = pipelines.size();
return n == 0? false: pipelines.get(n-1).contains(index, id);
}
}
/** Action for DataNode */
public static abstract class DataNodeAction implements
Action<DatanodeID, IOException> {
/** The name of the test */
final String currentTest;
/** The index of the datanode */
final int index;
/**
* @param currentTest The name of the test
* @param index The index of the datanode
*/
protected DataNodeAction(String currentTest, int index) {
this.currentTest = currentTest;
this.index = index;
}
/** {@inheritDoc} */
public String toString() {
return getClass().getSimpleName() + ":" + currentTest
+ ", index=" + index;
}
/** return a String with this object and the datanodeID. */
String toString(DatanodeID datanodeID) {
return "FI: " + this + ", datanode="
+ datanodeID.getName();
}
}
/** An action to set a marker if the DatanodeID is matched. */
public static class DatanodeMarkingAction extends DataNodeAction {
private final MarkerConstraint marker;
/** Construct an object. */
public DatanodeMarkingAction(String currentTest, int index,
MarkerConstraint marker) {
super(currentTest, index);
this.marker = marker;
}
/** Set the marker if the DatanodeID is matched. */
@Override
public void run(DatanodeID datanodeid) throws IOException {
final DataTransferTest test = getDataTransferTest();
if (test.isNotSuccessAndLastPipelineContains(index, datanodeid)) {
marker.mark();
}
}
/** {@inheritDoc} */
public String toString() {
return super.toString() + ", " + marker;
}
}
/** Throws OutOfMemoryError. */
public static class OomAction extends DataNodeAction {
/** Create an action for datanode i in the pipeline. */
public OomAction(String currentTest, int i) {
super(currentTest, i);
}
@Override
public void run(DatanodeID id) {
final DataTransferTest test = getDataTransferTest();
if (test.isNotSuccessAndLastPipelineContains(index, id)) {
final String s = toString(id);
FiTestUtil.LOG.info(s);
throw new OutOfMemoryError(s);
}
}
}
/** Throws OutOfMemoryError if the count is zero. */
public static class CountdownOomAction extends OomAction {
private final CountdownConstraint countdown;
/** Create an action for datanode i in the pipeline with count down. */
public CountdownOomAction(String currentTest, int i, int count) {
super(currentTest, i);
countdown = new CountdownConstraint(count);
}
@Override
public void run(DatanodeID id) {
final DataTransferTest test = getDataTransferTest();
if (test.isNotSuccessAndLastPipelineContains(index, id)
&& countdown.isSatisfied()) {
final String s = toString(id);
FiTestUtil.LOG.info(s);
throw new OutOfMemoryError(s);
}
}
}
/** Throws DiskOutOfSpaceException. */
public static class DoosAction extends DataNodeAction {
/** Create an action for datanode i in the pipeline. */
public DoosAction(String currentTest, int i) {
super(currentTest, i);
}
@Override
public void run(DatanodeID id) throws DiskOutOfSpaceException {
final DataTransferTest test = getDataTransferTest();
if (test.isNotSuccessAndLastPipelineContains(index, id)) {
final String s = toString(id);
FiTestUtil.LOG.info(s);
throw new DiskOutOfSpaceException(s);
}
}
}
/** Throws an IOException. */
public static class IoeAction extends DataNodeAction {
private final String error;
/** Create an action for datanode i in the pipeline. */
public IoeAction(String currentTest, int i, String error) {
super(currentTest, i);
this.error = error;
}
@Override
public void run(DatanodeID id) throws IOException {
final DataTransferTest test = getDataTransferTest();
if (test.isNotSuccessAndLastPipelineContains(index, id)) {
final String s = toString(id);
FiTestUtil.LOG.info(s);
throw new IOException(s);
}
}
@Override
public String toString() {
return error + " " + super.toString();
}
}
/** Throws DiskOutOfSpaceException if the count is zero. */
public static class CountdownDoosAction extends DoosAction {
private final CountdownConstraint countdown;
/** Create an action for datanode i in the pipeline with count down. */
public CountdownDoosAction(String currentTest, int i, int count) {
super(currentTest, i);
countdown = new CountdownConstraint(count);
}
@Override
public void run(DatanodeID id) throws DiskOutOfSpaceException {
final DataTransferTest test = getDataTransferTest();
if (test.isNotSuccessAndLastPipelineContains(index, id)
&& countdown.isSatisfied()) {
final String s = toString(id);
FiTestUtil.LOG.info(s);
throw new DiskOutOfSpaceException(s);
}
}
}
/**
* Sleep some period of time so that it slows down the datanode
* or sleep forever so that datanode becomes not responding.
*/
public static class SleepAction extends DataNodeAction {
/** In milliseconds;
* must have (0 <= minDuration < maxDuration) or (maxDuration <= 0).
*/
final long minDuration;
/** In milliseconds; maxDuration <= 0 means sleeping forever.*/
final long maxDuration;
/**
* Create an action for datanode i in the pipeline.
* @param duration In milliseconds, duration <= 0 means sleeping forever.
*/
public SleepAction(String currentTest, int i, long duration) {
this(currentTest, i, duration, duration <= 0? duration: duration+1);
}
/**
* Create an action for datanode i in the pipeline.
* @param minDuration minimum sleep time
* @param maxDuration maximum sleep time
*/
public SleepAction(String currentTest, int i,
long minDuration, long maxDuration) {
super(currentTest, i);
if (maxDuration > 0) {
if (minDuration < 0) {
throw new IllegalArgumentException("minDuration = " + minDuration
+ " < 0 but maxDuration = " + maxDuration + " > 0");
}
if (minDuration >= maxDuration) {
throw new IllegalArgumentException(
minDuration + " = minDuration >= maxDuration = " + maxDuration);
}
}
this.minDuration = minDuration;
this.maxDuration = maxDuration;
}
@Override
public void run(DatanodeID id) {
final DataTransferTest test = getDataTransferTest();
if (test.isNotSuccessAndLastPipelineContains(index, id)) {
FiTestUtil.LOG.info(toString(id));
if (maxDuration <= 0) {
for(; FiTestUtil.sleep(1000); ); //sleep forever until interrupt
} else {
FiTestUtil.sleep(minDuration, maxDuration);
}
}
}
/** {@inheritDoc} */
@Override
public String toString() {
return super.toString() + ", duration="
+ (maxDuration <= 0? "infinity": "[" + minDuration + ", " + maxDuration + ")");
}
}
/**
* When the count is zero,
* sleep some period of time so that it slows down the datanode
* or sleep forever so that datanode becomes not responding.
*/
public static class CountdownSleepAction extends SleepAction {
private final CountdownConstraint countdown;
/**
* Create an action for datanode i in the pipeline.
* @param duration In milliseconds, duration <= 0 means sleeping forever.
*/
public CountdownSleepAction(String currentTest, int i,
long duration, int count) {
this(currentTest, i, duration, duration+1, count);
}
/** Create an action for datanode i in the pipeline with count down. */
public CountdownSleepAction(String currentTest, int i,
long minDuration, long maxDuration, int count) {
super(currentTest, i, minDuration, maxDuration);
countdown = new CountdownConstraint(count);
}
@Override
public void run(DatanodeID id) {
final DataTransferTest test = getDataTransferTest();
if (test.isNotSuccessAndLastPipelineContains(index, id)
&& countdown.isSatisfied()) {
final String s = toString(id) + ", duration = ["
+ minDuration + "," + maxDuration + ")";
FiTestUtil.LOG.info(s);
if (maxDuration <= 1) {
for(; FiTestUtil.sleep(1000); ); //sleep forever until interrupt
} else {
FiTestUtil.sleep(minDuration, maxDuration);
}
}
}
}
/** Action for pipeline error verification */
public static class VerificationAction implements
Action<Integer, RuntimeException> {
/** The name of the test */
final String currentTest;
/** The error index of the datanode */
final int errorIndex;
/**
* Create a verification action for errors at datanode i in the pipeline.
*
* @param currentTest The name of the test
* @param i The error index of the datanode
*/
public VerificationAction(String currentTest, int i) {
this.currentTest = currentTest;
this.errorIndex = i;
}
/** {@inheritDoc} */
public String toString() {
return currentTest + ", errorIndex=" + errorIndex;
}
@Override
public void run(Integer i) {
if (i == errorIndex) {
FiTestUtil.LOG.info(this + ", successfully verified.");
getDataTransferTest().markSuccess();
}
}
}
/**
* Create a OomAction with a CountdownConstraint
* so that it throws OutOfMemoryError if the count is zero.
*/
public static ConstraintSatisfactionAction<DatanodeID, IOException>
createCountdownOomAction(
String currentTest, int i, int count) {
return new ConstraintSatisfactionAction<DatanodeID, IOException>(
new OomAction(currentTest, i), new CountdownConstraint(count));
}
/**
* Create a DoosAction with a CountdownConstraint
* so that it throws DiskOutOfSpaceException if the count is zero.
*/
public static ConstraintSatisfactionAction<DatanodeID, IOException>
createCountdownDoosAction(
String currentTest, int i, int count) {
return new ConstraintSatisfactionAction<DatanodeID, IOException>(
new DoosAction(currentTest, i), new CountdownConstraint(count));
}
/**
* Create a SleepAction with a CountdownConstraint
* for datanode i in the pipeline.
* When the count is zero,
* sleep some period of time so that it slows down the datanode
* or sleep forever so the that datanode becomes not responding.
*/
public static ConstraintSatisfactionAction<DatanodeID, IOException> createCountdownSleepAction(
String currentTest, int i, long minDuration, long maxDuration, int count) {
return new ConstraintSatisfactionAction<DatanodeID, IOException>(
new SleepAction(currentTest, i, minDuration, maxDuration),
new CountdownConstraint(count));
}
/**
* Same as
* createCountdownSleepAction(currentTest, i, duration, duration+1, count).
*/
public static ConstraintSatisfactionAction<DatanodeID, IOException> createCountdownSleepAction(
String currentTest, int i, long duration, int count) {
return createCountdownSleepAction(currentTest, i, duration, duration+1,
count);
}
}
| 16,904 | 33.85567 | 97 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/fi/FiHFlushTestUtil.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fi;
import java.io.IOException;
import org.apache.hadoop.fi.FiTestUtil.ActionContainer;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
/** Helper methods and actions for hflush() fault injection tests */
public class FiHFlushTestUtil extends DataTransferTestUtil {
/** {@inheritDoc} */
public static PipelineTest initTest() {
return thepipelinetest = new HFlushTest();
}
/** Disk error action for fault injection tests */
public static class DerrAction extends DataTransferTestUtil.DataNodeAction {
/**
* @param currentTest The name of the test
* @param index The index of the datanode
*/
public DerrAction(String currentTest, int index) {
super(currentTest, index);
}
/** {@inheritDoc} */
public void run(DatanodeID id) throws IOException {
final Pipeline p = getPipelineTest().getPipelineForDatanode(id);
if (p == null) {
return;
}
if (p.contains(index, id)) {
final String s = super.toString(id);
FiTestUtil.LOG.info(s);
throw new DiskErrorException(s);
}
}
}
/** Class adds new type of action */
public static class HFlushTest extends DataTransferTest {
public final ActionContainer<DatanodeID, IOException> fiCallHFlush =
new ActionContainer<DatanodeID, IOException>();
public final ActionContainer<Integer, RuntimeException> fiErrorOnCallHFlush =
new ActionContainer<Integer, RuntimeException>();
}
}
| 2,373 | 34.969697 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/TestFiHftp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import java.io.File;
import java.io.IOException;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.FSDataset;
import org.apache.hadoop.io.IOUtils;
import org.apache.log4j.Level;
import org.junit.Assert;
import org.junit.Test;
public class TestFiHftp {
final Log LOG = FileSystem.LOG;
{
((Log4JLogger)LOG).getLogger().setLevel(Level.ALL);
}
static final short DATANODE_NUM = 1;
static final Random ran = new Random();
static final byte[] buffer = new byte[1 << 16];
static final MessageDigest md5;
static {
try {
md5 = MessageDigest.getInstance("MD5");
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
}
private static byte[] createFile(FileSystem fs, Path name, long length,
short replication, long blocksize) throws IOException {
final FSDataOutputStream out = fs.create(name, false, 4096,
replication, blocksize);
try {
for(long n = length; n > 0; ) {
ran.nextBytes(buffer);
final int w = n < buffer.length? (int)n: buffer.length;
out.write(buffer, 0, w);
md5.update(buffer, 0, w);
n -= w;
}
} finally {
IOUtils.closeStream(out);
}
return md5.digest();
}
@Test
public void testHftpOpen() throws IOException {
final Configuration conf = new Configuration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build();
cluster.waitActive();
//test with a file
//which is larger than the servlet response buffer size
{
final long blocksize = 1L << 20; //
final long filesize = 2*blocksize + 100;
runTestHftpOpen(cluster, "/foo", blocksize, filesize);
}
//test with a small file
//which is smaller than the servlet response buffer size
{
final long blocksize = 1L << 10; //
final long filesize = 2*blocksize + 100;
runTestHftpOpen(cluster, "/small", blocksize, filesize);
}
} finally {
if (cluster != null) {cluster.shutdown();}
}
}
/**
* A test with a 3GB file.
* It may take ~6 minutes.
*/
void largeFileTest(final MiniDFSCluster cluster) throws IOException {
final long blocksize = 128L << 20;
final long filesize = 3L << 30;
runTestHftpOpen(cluster, "/large", blocksize, filesize);
}
/**
* @param blocksize
* @param filesize must be > block size
*/
private void runTestHftpOpen(final MiniDFSCluster cluster, final String file,
final long blocksize, final long filesize) throws IOException {
//create a file
final DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
final Path filepath = new Path(file);
final byte[] filemd5 = createFile(dfs, filepath, filesize, DATANODE_NUM,
blocksize);
DFSTestUtil.waitReplication(dfs, filepath, DATANODE_NUM);
//test hftp open and read
final HftpFileSystem hftpfs = cluster.getHftpFileSystem(0);
{
final FSDataInputStream in = hftpfs.open(filepath);
long bytesRead = 0;
try {
for(int r; (r = in.read(buffer)) != -1; ) {
bytesRead += r;
md5.update(buffer, 0, r);
}
} finally {
LOG.info("bytesRead=" + bytesRead);
in.close();
}
Assert.assertEquals(filesize, bytesRead);
Assert.assertArrayEquals(filemd5, md5.digest());
}
//delete the second block
final DFSClient client = dfs.getClient();
final LocatedBlocks locatedblocks = client.getNamenode().getBlockLocations(
file, 0, filesize);
Assert.assertEquals((filesize - 1)/blocksize + 1,
locatedblocks.locatedBlockCount());
final LocatedBlock lb = locatedblocks.get(1);
final ExtendedBlock blk = lb.getBlock();
Assert.assertEquals(blocksize, lb.getBlockSize());
final DatanodeInfo[] datanodeinfos = lb.getLocations();
Assert.assertEquals(DATANODE_NUM, datanodeinfos.length);
final DataNode dn = cluster.getDataNode(datanodeinfos[0].getIpcPort());
LOG.info("dn=" + dn + ", blk=" + blk + " (length=" + blk.getNumBytes() + ")");
final FSDataset data = (FSDataset)dn.getFSDataset();
final File blkfile = data.getBlockFile(blk);
Assert.assertTrue(blkfile.delete());
//read again by hftp, should get an exception
LOG.info("hftpfs.getUri() = " + hftpfs.getUri());
final ContentSummary cs = hftpfs.getContentSummary(filepath);
LOG.info("hftpfs.getContentSummary = " + cs);
Assert.assertEquals(filesize, cs.getLength());
final FSDataInputStream in = hftpfs.open(hftpfs.makeQualified(filepath));
long bytesRead = 0;
try {
for(int r; (r = in.read(buffer)) != -1; ) {
bytesRead += r;
}
Assert.fail();
} catch(IOException ioe) {
LOG.info("GOOD: get an exception", ioe);
} finally {
LOG.info("bytesRead=" + bytesRead);
in.close();
}
}
}
| 6,523 | 33.702128 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/TestFiHFlush.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fi.DataTransferTestUtil;
import org.apache.hadoop.fi.FiHFlushTestUtil;
import org.apache.hadoop.fi.FiHFlushTestUtil.DerrAction;
import org.apache.hadoop.fi.FiHFlushTestUtil.HFlushTest;
import org.apache.hadoop.fi.FiTestUtil;
import static org.junit.Assert.assertTrue;
import org.junit.Test;
import java.io.IOException;
/** Class provides basic fault injection tests according to the test plan
of HDFS-265
*/
public class TestFiHFlush {
/** Methods initializes a test and sets required actions to be used later by
* an injected advice
* @param conf mini cluster configuration
* @param methodName String representation of a test method invoking this
* method
* @param block_size needed size of file's block
* @param a is an action to be set for the set
* @throws IOException in case of any errors
*/
private static void runDiskErrorTest (final Configuration conf,
final String methodName, final int block_size, DerrAction a, int index,
boolean trueVerification)
throws IOException {
FiTestUtil.LOG.info("Running " + methodName + " ...");
final HFlushTest hft = (HFlushTest) FiHFlushTestUtil.initTest();
hft.fiCallHFlush.set(a);
hft.fiErrorOnCallHFlush.set(new DataTransferTestUtil.VerificationAction(methodName, index));
TestHFlush.doTheJob(conf, methodName, block_size, (short)3);
if (trueVerification)
assertTrue("Some of expected conditions weren't detected", hft.isSuccess());
}
/** The tests calls
* {@link #runDiskErrorTest(Configuration, String, int, DerrAction, int, boolean)}
* to make a number of writes within a block boundaries.
* Although hflush() is called the test shouldn't expect an IOException
* in this case because the invocation is happening after write() call
* is complete when pipeline doesn't exist anymore.
* Thus, injected fault won't be triggered for 0th datanode
*/
@Test
public void hFlushFi01_a() throws IOException {
final String methodName = FiTestUtil.getMethodName();
runDiskErrorTest(new HdfsConfiguration(), methodName,
AppendTestUtil.BLOCK_SIZE, new DerrAction(methodName, 0), 0, false);
}
/** The tests calls
* {@link #runDiskErrorTest(Configuration, String, int, DerrAction, int, boolean)}
* to make a number of writes across a block boundaries.
* hflush() is called after each write() during a pipeline life time.
* Thus, injected fault ought to be triggered for 0th datanode
*/
@Test
public void hFlushFi01_b() throws IOException {
final String methodName = FiTestUtil.getMethodName();
Configuration conf = new HdfsConfiguration();
int customPerChecksumSize = 512;
int customBlockSize = customPerChecksumSize * 3;
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
runDiskErrorTest(conf, methodName,
customBlockSize, new DerrAction(methodName, 0), 0, true);
}
/** Similar to {@link #hFlushFi01_b()} but writing happens
* across block and checksum's boundaries
*/
@Test
public void hFlushFi01_c() throws Exception {
final String methodName = FiTestUtil.getMethodName();
Configuration conf = new HdfsConfiguration();
int customPerChecksumSize = 400;
int customBlockSize = customPerChecksumSize * 3;
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
runDiskErrorTest(conf, methodName,
customBlockSize, new DerrAction(methodName, 0), 0, true);
}
/** Similar to {@link #hFlushFi01_a()} but for a pipeline's 1st datanode
*/
@Test
public void hFlushFi02_a() throws IOException {
final String methodName = FiTestUtil.getMethodName();
runDiskErrorTest(new HdfsConfiguration(), methodName,
AppendTestUtil.BLOCK_SIZE, new DerrAction(methodName, 1), 1, false);
}
/** Similar to {@link #hFlushFi01_b()} but for a pipeline's 1st datanode
*/
@Test
public void hFlushFi02_b() throws IOException {
final String methodName = FiTestUtil.getMethodName();
Configuration conf = new HdfsConfiguration();
int customPerChecksumSize = 512;
int customBlockSize = customPerChecksumSize * 3;
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
runDiskErrorTest(conf, methodName,
customBlockSize, new DerrAction(methodName, 1), 1, true);
}
/** Similar to {@link #hFlushFi01_c()} but for a pipeline's 1st datanode
*/
@Test
public void hFlushFi02_c() throws IOException {
final String methodName = FiTestUtil.getMethodName();
Configuration conf = new HdfsConfiguration();
int customPerChecksumSize = 400;
int customBlockSize = customPerChecksumSize * 3;
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
runDiskErrorTest(conf, methodName,
customBlockSize, new DerrAction(methodName, 1), 1, true);
}
/** Similar to {@link #hFlushFi01_a()} but for a pipeline's 2nd datanode
*/
@Test
public void hFlushFi03_a() throws IOException {
final String methodName = FiTestUtil.getMethodName();
runDiskErrorTest(new HdfsConfiguration(), methodName,
AppendTestUtil.BLOCK_SIZE, new DerrAction(methodName, 2), 2, false);
}
/** Similar to {@link #hFlushFi01_b()} but for a pipeline's 2nd datanode
*/
@Test
public void hFlushFi03_b() throws IOException {
final String methodName = FiTestUtil.getMethodName();
Configuration conf = new HdfsConfiguration();
int customPerChecksumSize = 512;
int customBlockSize = customPerChecksumSize * 3;
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
runDiskErrorTest(conf, methodName,
customBlockSize, new DerrAction(methodName, 2), 2, true);
}
/** Similar to {@link #hFlushFi01_c()} but for a pipeline's 2nd datanode
*/
@Test
public void hFlushFi03_c() throws IOException {
final String methodName = FiTestUtil.getMethodName();
Configuration conf = new HdfsConfiguration();
int customPerChecksumSize = 400;
int customBlockSize = customPerChecksumSize * 3;
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
runDiskErrorTest(conf, methodName,
customBlockSize, new DerrAction(methodName, 2), 2, true);
}
}
| 7,607 | 41.033149 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/TestFiPipelines.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fi.FiTestUtil;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class TestFiPipelines {
public static final Log LOG = LogFactory.getLog(TestFiPipelines.class);
private static short REPL_FACTOR = 3;
private static final int RAND_LIMIT = 2000;
private MiniDFSCluster cluster;
private DistributedFileSystem fs;
private static Configuration conf;
Random rand = new Random(RAND_LIMIT);
static {
initLoggers();
setConfiguration();
}
@Before
public void startUpCluster() throws IOException {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL_FACTOR).build();
fs = (DistributedFileSystem) cluster.getFileSystem();
}
@After
synchronized public void shutDownCluster() throws IOException {
if (cluster != null) cluster.shutdown();
}
/**
* Test initiates and sets actions created by injection framework. The actions
* work with both aspects of sending acknologment packets in a pipeline.
* Creates and closes a file of certain length < packet size.
* Injected actions will check if number of visible bytes at datanodes equals
* to number of acknoleged bytes
*
* @throws IOException in case of an error
*/
@Test
public void pipeline_04() throws IOException {
final String METHOD_NAME = GenericTestUtils.getMethodName();
if(LOG.isDebugEnabled()) {
LOG.debug("Running " + METHOD_NAME);
}
final PipelinesTestUtil.PipelinesTest pipst =
(PipelinesTestUtil.PipelinesTest) PipelinesTestUtil.initTest();
pipst.fiCallSetNumBytes.set(new PipelinesTestUtil.ReceivedCheckAction(METHOD_NAME));
pipst.fiCallSetBytesAcked.set(new PipelinesTestUtil.AckedCheckAction(METHOD_NAME));
Path filePath = new Path("/" + METHOD_NAME + ".dat");
FSDataOutputStream fsOut = fs.create(filePath);
TestPipelines.writeData(fsOut, 2);
fs.close();
}
/**
* Similar to pipeline_04 but sends many packets into a pipeline
* @throws IOException in case of an error
*/
@Test
public void pipeline_05() throws IOException {
final String METHOD_NAME = GenericTestUtils.getMethodName();
if(LOG.isDebugEnabled()) {
LOG.debug("Running " + METHOD_NAME);
}
final PipelinesTestUtil.PipelinesTest pipst =
(PipelinesTestUtil.PipelinesTest) PipelinesTestUtil.initTest();
pipst.fiCallSetNumBytes.set(new PipelinesTestUtil.ReceivedCheckAction(METHOD_NAME));
pipst.fiCallSetBytesAcked.set(new PipelinesTestUtil.AckedCheckAction(METHOD_NAME));
Path filePath = new Path("/" + METHOD_NAME + ".dat");
FSDataOutputStream fsOut = fs.create(filePath);
for (int i = 0; i < 17; i++) {
TestPipelines.writeData(fsOut, 23);
}
fs.close();
}
/**
* This quite tricky test prevents acknowledgement packets from a datanode
* This should block any write attempts after ackQueue is full.
* Test is blocking, so the MiniDFSCluster has to be killed harshly.
* @throws IOException in case of an error
*/
@Test
public void pipeline_06() throws IOException {
final String METHOD_NAME = GenericTestUtils.getMethodName();
final int MAX_PACKETS = 80;
if(LOG.isDebugEnabled()) {
LOG.debug("Running " + METHOD_NAME);
}
final PipelinesTestUtil.PipelinesTest pipst =
(PipelinesTestUtil.PipelinesTest) PipelinesTestUtil.initTest();
pipst.setSuspend(true); // This is ack. suspend test
Path filePath = new Path("/" + METHOD_NAME + ".dat");
FSDataOutputStream fsOut = fs.create(filePath);
int cnt = 0;
try {
// At this point let's start an external checker thread, which will
// verify the test's results and shutdown the MiniDFSCluster for us,
// because what it's gonna do has BLOCKING effect on datanodes
QueueChecker cq = new QueueChecker(pipst, MAX_PACKETS);
cq.start();
// The following value is explained by the fact that size of a packet isn't
// necessary equals to the value of
// DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY
// The actual logic is expressed in DFSClient#computePacketChunkSize
int bytesToSend = 700;
while (cnt < 100 && pipst.getSuspend()) {
if(LOG.isDebugEnabled()) {
LOG.debug("_06(): " + cnt++ + " sending another " +
bytesToSend + " bytes");
}
TestPipelines.writeData(fsOut, bytesToSend);
}
} catch (Exception e) {
LOG.warn("Getting unexpected exception: ", e);
}
if(LOG.isDebugEnabled()) {
LOG.debug("Last queued packet number " + pipst.getLastQueued());
}
assertTrue("Shouldn't be able to send more than 81 packet", pipst.getLastQueued() <= 81);
}
private class QueueChecker extends Thread {
PipelinesTestUtil.PipelinesTest test;
final int MAX;
boolean done = false;
public QueueChecker(PipelinesTestUtil.PipelinesTest handle, int maxPackets) {
test = handle;
MAX = maxPackets;
}
@Override
public void run() {
while (!done) {
if(LOG.isDebugEnabled()) {
LOG.debug("_06: checking for the limit " + test.getLastQueued() +
" and " + MAX);
}
if (test.getLastQueued() >= MAX) {
if(LOG.isDebugEnabled()) {
LOG.debug("FI: Resume packets acking");
}
test.setSuspend(false); //Do not suspend ack sending any more
done = true;
}
if (!done)
try {
if(LOG.isDebugEnabled()) {
LOG.debug("_06: MAX isn't reached yet. Current=" +
test.getLastQueued());
}
sleep(100);
} catch (InterruptedException e) { }
}
assertTrue("Shouldn't be able to send more than 81 packet", test.getLastQueued() <= 81);
try {
if(LOG.isDebugEnabled()) {
LOG.debug("_06: shutting down the cluster");
}
// It has to be done like that, because local version of shutDownCluster()
// won't work, because it tries to close an instance of FileSystem too.
// Which is where the waiting is happening.
if (cluster !=null )
shutDownCluster();
} catch (Exception e) {
e.printStackTrace();
}
if(LOG.isDebugEnabled()) {
LOG.debug("End QueueChecker thread");
}
}
}
private static void setConfiguration() {
conf = new Configuration();
int customPerChecksumSize = 700;
int customBlockSize = customPerChecksumSize * 3;
conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 100);
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, customBlockSize / 2);
conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 0);
}
private static void initLoggers() {
((Log4JLogger) NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
((Log4JLogger) LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger) DataNode.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) TestFiPipelines.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) DFSClient.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) FiTestUtil.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) BlockReceiverAspects.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) DFSClientAspects.LOG).getLogger().setLevel(Level.ALL);
}
}
| 9,070 | 35.576613 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/PipelinesTestUtil.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.LinkedList;
import org.apache.hadoop.fi.DataTransferTestUtil;
import org.apache.hadoop.fi.FiTestUtil;
import org.apache.hadoop.fi.PipelineTest;
import org.apache.hadoop.fi.FiTestUtil.ActionContainer;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
public class PipelinesTestUtil extends DataTransferTestUtil {
/**
* {@inheritDoc}
*/
public static PipelineTest initTest() {
return thepipelinetest = new PipelinesTest();
}
/**
* Storing acknowleged bytes num. action for fault injection tests
*/
public static class ReceivedCheckAction implements FiTestUtil.Action<NodeBytes, IOException> {
String name;
LinkedList<NodeBytes> rcv = ((PipelinesTest) getPipelineTest()).received;
LinkedList<NodeBytes> ack = ((PipelinesTest) getPipelineTest()).acked;
/**
* @param name of the test
*/
public ReceivedCheckAction(String name) {
this.name = name;
}
@Override
public void run(NodeBytes nb) throws IOException {
synchronized (rcv) {
rcv.add(nb);
for (NodeBytes n : rcv) {
long counterPartsBytes = -1;
NodeBytes counterPart = null;
if (ack.size() > rcv.indexOf(n)) {
counterPart = ack.get(rcv.indexOf(n));
counterPartsBytes = counterPart.bytes;
}
assertTrue("FI: Wrong receiving length",
counterPartsBytes <= n.bytes);
if(FiTestUtil.LOG.isDebugEnabled()) {
FiTestUtil.LOG.debug("FI: before compare of Recv bytes. Expected "
+ n.bytes + ", got " + counterPartsBytes);
}
}
}
}
}
/**
* Storing acknowleged bytes num. action for fault injection tests
*/
public static class AckedCheckAction implements FiTestUtil.Action<NodeBytes, IOException> {
String name;
LinkedList<NodeBytes> rcv = ((PipelinesTest) getPipelineTest()).received;
LinkedList<NodeBytes> ack = ((PipelinesTest) getPipelineTest()).acked;
/**
* @param name of the test
*/
public AckedCheckAction(String name) {
this.name = name;
}
/**
* {@inheritDoc}
*/
public void run(NodeBytes nb) throws IOException {
synchronized (ack) {
ack.add(nb);
for (NodeBytes n : ack) {
NodeBytes counterPart = null;
long counterPartsBytes = -1;
if (rcv.size() > ack.indexOf(n)) {
counterPart = rcv.get(ack.indexOf(n));
counterPartsBytes = counterPart.bytes;
}
assertTrue("FI: Wrong acknowledged length",
counterPartsBytes == n.bytes);
if(FiTestUtil.LOG.isDebugEnabled()) {
FiTestUtil.LOG.debug(
"FI: before compare of Acked bytes. Expected " +
n.bytes + ", got " + counterPartsBytes);
}
}
}
}
}
/**
* Class adds new types of action
*/
public static class PipelinesTest extends DataTransferTest {
LinkedList<NodeBytes> received = new LinkedList<NodeBytes>();
LinkedList<NodeBytes> acked = new LinkedList<NodeBytes>();
public final ActionContainer<NodeBytes, IOException> fiCallSetNumBytes =
new ActionContainer<NodeBytes, IOException>();
public final ActionContainer<NodeBytes, IOException> fiCallSetBytesAcked =
new ActionContainer<NodeBytes, IOException>();
private static boolean suspend = false;
private static long lastQueuedPacket = -1;
public void setSuspend(boolean flag) {
suspend = flag;
}
public boolean getSuspend () {
return suspend;
}
public void setVerified(long packetNum) {
PipelinesTest.lastQueuedPacket = packetNum;
}
public long getLastQueued() {
return lastQueuedPacket;
}
}
public static class NodeBytes {
DatanodeID id;
long bytes;
public NodeBytes(DatanodeID id, long bytes) {
this.id = id;
this.bytes = bytes;
}
}
}
| 4,870 | 30.62987 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiPipelineClose.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import java.io.IOException;
import org.apache.hadoop.fi.DataTransferTestUtil;
import org.apache.hadoop.fi.DataTransferTestUtil.DataNodeAction;
import org.apache.hadoop.fi.DataTransferTestUtil.DataTransferTest;
import org.apache.hadoop.fi.DataTransferTestUtil.DatanodeMarkingAction;
import org.apache.hadoop.fi.DataTransferTestUtil.IoeAction;
import org.apache.hadoop.fi.DataTransferTestUtil.OomAction;
import org.apache.hadoop.fi.DataTransferTestUtil.SleepAction;
import org.apache.hadoop.fi.FiTestUtil;
import org.apache.hadoop.fi.FiTestUtil.Action;
import org.apache.hadoop.fi.FiTestUtil.ConstraintSatisfactionAction;
import org.apache.hadoop.fi.FiTestUtil.MarkerConstraint;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.junit.Test;
/** Test DataTransferProtocol with fault injection. */
public class TestFiPipelineClose {
private static void runPipelineCloseTest(String methodName,
Action<DatanodeID, IOException> a) throws IOException {
FiTestUtil.LOG.info("Running " + methodName + " ...");
final DataTransferTest t = (DataTransferTest) DataTransferTestUtil
.initTest();
t.fiPipelineClose.set(a);
TestFiDataTransferProtocol.write1byte(methodName);
}
/**
* Pipeline close:
* DN0 never responses after received close request from client.
* Client gets an IOException and determine DN0 bad.
*/
@Test
public void pipeline_Fi_36() throws IOException {
final String methodName = FiTestUtil.getMethodName();
runPipelineCloseTest(methodName, new SleepAction(methodName, 0, 0));
}
/**
* Pipeline close:
* DN1 never responses after received close request from client.
* Client gets an IOException and determine DN1 bad.
*/
@Test
public void pipeline_Fi_37() throws IOException {
final String methodName = FiTestUtil.getMethodName();
runPipelineCloseTest(methodName, new SleepAction(methodName, 1, 0));
}
/**
* Pipeline close:
* DN2 never responses after received close request from client.
* Client gets an IOException and determine DN2 bad.
*/
@Test
public void pipeline_Fi_38() throws IOException {
final String methodName = FiTestUtil.getMethodName();
runPipelineCloseTest(methodName, new SleepAction(methodName, 2, 0));
}
private static void run41_43(String name, int i) throws IOException {
runPipelineCloseTest(name, new SleepAction(name, i, 3000));
}
private static void runPipelineCloseAck(String name, int i, DataNodeAction a
) throws IOException {
FiTestUtil.LOG.info("Running " + name + " ...");
final DataTransferTest t = (DataTransferTest)DataTransferTestUtil.initTest();
final MarkerConstraint marker = new MarkerConstraint(name);
t.fiPipelineClose.set(new DatanodeMarkingAction(name, i, marker));
t.fiPipelineAck.set(new ConstraintSatisfactionAction<DatanodeID, IOException>(a, marker));
TestFiDataTransferProtocol.write1byte(name);
}
private static void run39_40(String name, int i) throws IOException {
runPipelineCloseAck(name, i, new SleepAction(name, i, 0));
}
/**
* Pipeline close:
* DN1 never responses after received close ack DN2.
* Client gets an IOException and determine DN1 bad.
*/
@Test
public void pipeline_Fi_39() throws IOException {
run39_40(FiTestUtil.getMethodName(), 1);
}
/**
* Pipeline close:
* DN0 never responses after received close ack DN1.
* Client gets an IOException and determine DN0 bad.
*/
@Test
public void pipeline_Fi_40() throws IOException {
run39_40(FiTestUtil.getMethodName(), 0);
}
/**
* Pipeline close with DN0 very slow but it won't lead to timeout.
* Client finishes close successfully.
*/
@Test
public void pipeline_Fi_41() throws IOException {
run41_43(FiTestUtil.getMethodName(), 0);
}
/**
* Pipeline close with DN1 very slow but it won't lead to timeout.
* Client finishes close successfully.
*/
@Test
public void pipeline_Fi_42() throws IOException {
run41_43(FiTestUtil.getMethodName(), 1);
}
/**
* Pipeline close with DN2 very slow but it won't lead to timeout.
* Client finishes close successfully.
*/
@Test
public void pipeline_Fi_43() throws IOException {
run41_43(FiTestUtil.getMethodName(), 2);
}
/**
* Pipeline close:
* DN0 throws an OutOfMemoryException
* right after it received a close request from client.
* Client gets an IOException and determine DN0 bad.
*/
@Test
public void pipeline_Fi_44() throws IOException {
final String methodName = FiTestUtil.getMethodName();
runPipelineCloseTest(methodName, new OomAction(methodName, 0));
}
/**
* Pipeline close:
* DN1 throws an OutOfMemoryException
* right after it received a close request from client.
* Client gets an IOException and determine DN1 bad.
*/
@Test
public void pipeline_Fi_45() throws IOException {
final String methodName = FiTestUtil.getMethodName();
runPipelineCloseTest(methodName, new OomAction(methodName, 1));
}
/**
* Pipeline close:
* DN2 throws an OutOfMemoryException
* right after it received a close request from client.
* Client gets an IOException and determine DN2 bad.
*/
@Test
public void pipeline_Fi_46() throws IOException {
final String methodName = FiTestUtil.getMethodName();
runPipelineCloseTest(methodName, new OomAction(methodName, 2));
}
private static void run47_48(String name, int i) throws IOException {
runPipelineCloseAck(name, i, new OomAction(name, i));
}
/**
* Pipeline close:
* DN1 throws an OutOfMemoryException right after
* it received a close ack from DN2.
* Client gets an IOException and determine DN1 bad.
*/
@Test
public void pipeline_Fi_47() throws IOException {
run47_48(FiTestUtil.getMethodName(), 1);
}
/**
* Pipeline close:
* DN0 throws an OutOfMemoryException right after
* it received a close ack from DN1.
* Client gets an IOException and determine DN0 bad.
*/
@Test
public void pipeline_Fi_48() throws IOException {
run47_48(FiTestUtil.getMethodName(), 0);
}
private static void runBlockFileCloseTest(String methodName,
Action<DatanodeID, IOException> a) throws IOException {
FiTestUtil.LOG.info("Running " + methodName + " ...");
final DataTransferTest t = (DataTransferTest) DataTransferTestUtil
.initTest();
t.fiBlockFileClose.set(a);
TestFiDataTransferProtocol.write1byte(methodName);
}
private static void run49_51(String name, int i) throws IOException {
runBlockFileCloseTest(name, new IoeAction(name, i, "DISK ERROR"));
}
/**
* Pipeline close:
* DN0 throws a disk error exception when it is closing the block file.
* Client gets an IOException and determine DN0 bad.
*/
@Test
public void pipeline_Fi_49() throws IOException {
run49_51(FiTestUtil.getMethodName(), 0);
}
/**
* Pipeline close:
* DN1 throws a disk error exception when it is closing the block file.
* Client gets an IOException and determine DN1 bad.
*/
@Test
public void pipeline_Fi_50() throws IOException {
run49_51(FiTestUtil.getMethodName(), 1);
}
/**
* Pipeline close:
* DN2 throws a disk error exception when it is closing the block file.
* Client gets an IOException and determine DN2 bad.
*/
@Test
public void pipeline_Fi_51() throws IOException {
run49_51(FiTestUtil.getMethodName(), 2);
}
}
| 8,305 | 32.091633 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import java.io.IOException;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fi.DataTransferTestUtil;
import org.apache.hadoop.fi.DataTransferTestUtil.DataTransferTest;
import org.apache.hadoop.fi.DataTransferTestUtil.DoosAction;
import org.apache.hadoop.fi.DataTransferTestUtil.OomAction;
import org.apache.hadoop.fi.DataTransferTestUtil.SleepAction;
import org.apache.hadoop.fi.DataTransferTestUtil.VerificationAction;
import org.apache.hadoop.fi.FiTestUtil;
import org.apache.hadoop.fi.FiTestUtil.Action;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.log4j.Level;
import org.junit.Assert;
import org.junit.Test;
/** Test DataTransferProtocol with fault injection. */
public class TestFiDataTransferProtocol {
static final short REPLICATION = 3;
static final long BLOCKSIZE = 1L * (1L << 20);
static final Configuration conf = new HdfsConfiguration();
static {
conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, REPLICATION);
conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5000);
}
static private FSDataOutputStream createFile(FileSystem fs, Path p
) throws IOException {
return fs.create(p, true,
fs.getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY,
4096), REPLICATION, BLOCKSIZE);
}
{
((Log4JLogger)DataTransferProtocol.LOG).getLogger().setLevel(Level.ALL);
}
/**
* 1. create files with dfs
* 2. write 1 byte
* 3. close file
* 4. open the same file
* 5. read the 1 byte and compare results
*/
static void write1byte(String methodName) throws IOException {
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf
).numDataNodes(REPLICATION + 1).build();
final FileSystem dfs = cluster.getFileSystem();
try {
final Path p = new Path("/" + methodName + "/foo");
final FSDataOutputStream out = createFile(dfs, p);
out.write(1);
out.close();
final FSDataInputStream in = dfs.open(p);
final int b = in.read();
in.close();
Assert.assertEquals(1, b);
}
finally {
dfs.close();
cluster.shutdown();
}
}
private static void runSlowDatanodeTest(String methodName, SleepAction a
) throws IOException {
FiTestUtil.LOG.info("Running " + methodName + " ...");
final DataTransferTest t = (DataTransferTest)DataTransferTestUtil.initTest();
t.fiCallReceivePacket.set(a);
t.fiReceiverOpWriteBlock.set(a);
t.fiStatusRead.set(a);
write1byte(methodName);
}
private static void runReceiverOpWriteBlockTest(String methodName,
int errorIndex, Action<DatanodeID, IOException> a) throws IOException {
FiTestUtil.LOG.info("Running " + methodName + " ...");
final DataTransferTest t = (DataTransferTest) DataTransferTestUtil
.initTest();
t.fiReceiverOpWriteBlock.set(a);
t.fiPipelineInitErrorNonAppend.set(new VerificationAction(methodName,
errorIndex));
write1byte(methodName);
Assert.assertTrue(t.isSuccess());
}
private static void runStatusReadTest(String methodName, int errorIndex,
Action<DatanodeID, IOException> a) throws IOException {
FiTestUtil.LOG.info("Running " + methodName + " ...");
final DataTransferTest t = (DataTransferTest) DataTransferTestUtil
.initTest();
t.fiStatusRead.set(a);
t.fiPipelineInitErrorNonAppend.set(new VerificationAction(methodName,
errorIndex));
write1byte(methodName);
Assert.assertTrue(t.isSuccess());
}
private static void runCallWritePacketToDisk(String methodName,
int errorIndex, Action<DatanodeID, IOException> a) throws IOException {
FiTestUtil.LOG.info("Running " + methodName + " ...");
final DataTransferTest t = (DataTransferTest)DataTransferTestUtil.initTest();
t.fiCallWritePacketToDisk.set(a);
t.fiPipelineErrorAfterInit.set(new VerificationAction(methodName, errorIndex));
write1byte(methodName);
Assert.assertTrue(t.isSuccess());
}
/**
* Pipeline setup:
* DN0 never responses after received setup request from client.
* Client gets an IOException and determine DN0 bad.
*/
@Test
public void pipeline_Fi_01() throws IOException {
final String methodName = FiTestUtil.getMethodName();
runReceiverOpWriteBlockTest(methodName, 0, new SleepAction(methodName, 0, 0));
}
/**
* Pipeline setup:
* DN1 never responses after received setup request from client.
* Client gets an IOException and determine DN1 bad.
*/
@Test
public void pipeline_Fi_02() throws IOException {
final String methodName = FiTestUtil.getMethodName();
runReceiverOpWriteBlockTest(methodName, 1, new SleepAction(methodName, 1, 0));
}
/**
* Pipeline setup:
* DN2 never responses after received setup request from client.
* Client gets an IOException and determine DN2 bad.
*/
@Test
public void pipeline_Fi_03() throws IOException {
final String methodName = FiTestUtil.getMethodName();
runReceiverOpWriteBlockTest(methodName, 2, new SleepAction(methodName, 2, 0));
}
/**
* Pipeline setup, DN1 never responses after received setup ack from DN2.
* Client gets an IOException and determine DN1 bad.
*/
@Test
public void pipeline_Fi_04() throws IOException {
final String methodName = FiTestUtil.getMethodName();
runStatusReadTest(methodName, 1, new SleepAction(methodName, 1, 0));
}
/**
* Pipeline setup, DN0 never responses after received setup ack from DN1.
* Client gets an IOException and determine DN0 bad.
*/
@Test
public void pipeline_Fi_05() throws IOException {
final String methodName = FiTestUtil.getMethodName();
runStatusReadTest(methodName, 0, new SleepAction(methodName, 0, 0));
}
/**
* Pipeline setup with DN0 very slow but it won't lead to timeout.
* Client finishes setup successfully.
*/
@Test
public void pipeline_Fi_06() throws IOException {
final String methodName = FiTestUtil.getMethodName();
runSlowDatanodeTest(methodName, new SleepAction(methodName, 0, 3000));
}
/**
* Pipeline setup with DN1 very slow but it won't lead to timeout.
* Client finishes setup successfully.
*/
@Test
public void pipeline_Fi_07() throws IOException {
final String methodName = FiTestUtil.getMethodName();
runSlowDatanodeTest(methodName, new SleepAction(methodName, 1, 3000));
}
/**
* Pipeline setup with DN2 very slow but it won't lead to timeout.
* Client finishes setup successfully.
*/
@Test
public void pipeline_Fi_08() throws IOException {
final String methodName = FiTestUtil.getMethodName();
runSlowDatanodeTest(methodName, new SleepAction(methodName, 2, 3000));
}
/**
* Pipeline setup, DN0 throws an OutOfMemoryException right after it
* received a setup request from client.
* Client gets an IOException and determine DN0 bad.
*/
@Test
public void pipeline_Fi_09() throws IOException {
final String methodName = FiTestUtil.getMethodName();
runReceiverOpWriteBlockTest(methodName, 0, new OomAction(methodName, 0));
}
/**
* Pipeline setup, DN1 throws an OutOfMemoryException right after it
* received a setup request from DN0.
* Client gets an IOException and determine DN1 bad.
*/
@Test
public void pipeline_Fi_10() throws IOException {
final String methodName = FiTestUtil.getMethodName();
runReceiverOpWriteBlockTest(methodName, 1, new OomAction(methodName, 1));
}
/**
* Pipeline setup, DN2 throws an OutOfMemoryException right after it
* received a setup request from DN1.
* Client gets an IOException and determine DN2 bad.
*/
@Test
public void pipeline_Fi_11() throws IOException {
final String methodName = FiTestUtil.getMethodName();
runReceiverOpWriteBlockTest(methodName, 2, new OomAction(methodName, 2));
}
/**
* Pipeline setup, DN1 throws an OutOfMemoryException right after it
* received a setup ack from DN2.
* Client gets an IOException and determine DN1 bad.
*/
@Test
public void pipeline_Fi_12() throws IOException {
final String methodName = FiTestUtil.getMethodName();
runStatusReadTest(methodName, 1, new OomAction(methodName, 1));
}
/**
* Pipeline setup, DN0 throws an OutOfMemoryException right after it
* received a setup ack from DN1.
* Client gets an IOException and determine DN0 bad.
*/
@Test
public void pipeline_Fi_13() throws IOException {
final String methodName = FiTestUtil.getMethodName();
runStatusReadTest(methodName, 0, new OomAction(methodName, 0));
}
/**
* Streaming: Write a packet, DN0 throws a DiskOutOfSpaceError
* when it writes the data to disk.
* Client gets an IOException and determine DN0 bad.
*/
@Test
public void pipeline_Fi_14() throws IOException {
final String methodName = FiTestUtil.getMethodName();
runCallWritePacketToDisk(methodName, 0, new DoosAction(methodName, 0));
}
/**
* Streaming: Write a packet, DN1 throws a DiskOutOfSpaceError
* when it writes the data to disk.
* Client gets an IOException and determine DN1 bad.
*/
@Test
public void pipeline_Fi_15() throws IOException {
final String methodName = FiTestUtil.getMethodName();
runCallWritePacketToDisk(methodName, 1, new DoosAction(methodName, 1));
}
/**
* Streaming: Write a packet, DN2 throws a DiskOutOfSpaceError
* when it writes the data to disk.
* Client gets an IOException and determine DN2 bad.
*/
@Test
public void pipeline_Fi_16() throws IOException {
final String methodName = FiTestUtil.getMethodName();
runCallWritePacketToDisk(methodName, 2, new DoosAction(methodName, 2));
}
}
| 11,123 | 34.653846 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol2.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import java.io.IOException;
import java.util.Random;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fi.DataTransferTestUtil;
import org.apache.hadoop.fi.DataTransferTestUtil.CountdownDoosAction;
import org.apache.hadoop.fi.DataTransferTestUtil.CountdownOomAction;
import org.apache.hadoop.fi.DataTransferTestUtil.CountdownSleepAction;
import org.apache.hadoop.fi.DataTransferTestUtil.DataTransferTest;
import org.apache.hadoop.fi.DataTransferTestUtil.SleepAction;
import org.apache.hadoop.fi.DataTransferTestUtil.VerificationAction;
import org.apache.hadoop.fi.FiTestUtil;
import static org.apache.hadoop.fs.CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
import org.apache.log4j.Level;
import org.junit.Assert;
import org.junit.Test;
/** Test DataTransferProtocol with fault injection. */
public class TestFiDataTransferProtocol2 {
static final short REPLICATION = 3;
static final long BLOCKSIZE = 1L * (1L << 20);
static final int PACKET_SIZE = 1024;
static final int MIN_N_PACKET = 3;
static final int MAX_N_PACKET = 10;
static final int MAX_SLEEP = 1000;
static final Configuration conf = new Configuration();
static {
conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, REPLICATION);
conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, PACKET_SIZE);
conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5000);
}
static final byte[] bytes = new byte[MAX_N_PACKET * PACKET_SIZE];
static final byte[] toRead = new byte[MAX_N_PACKET * PACKET_SIZE];
static private FSDataOutputStream createFile(FileSystem fs, Path p
) throws IOException {
return fs.create(p, true, fs.getConf()
.getInt(IO_FILE_BUFFER_SIZE_KEY, 4096), REPLICATION, BLOCKSIZE);
}
{
((Log4JLogger) BlockReceiver.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) DFSClient.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)DataTransferProtocol.LOG).getLogger().setLevel(Level.ALL);
}
/**
* 1. create files with dfs
* 2. write MIN_N_PACKET to MAX_N_PACKET packets
* 3. close file
* 4. open the same file
* 5. read the bytes and compare results
*/
private static void writeSeveralPackets(String methodName) throws IOException {
final Random r = FiTestUtil.RANDOM.get();
final int nPackets = FiTestUtil.nextRandomInt(MIN_N_PACKET, MAX_N_PACKET + 1);
final int lastPacketSize = FiTestUtil.nextRandomInt(1, PACKET_SIZE + 1);
final int size = (nPackets - 1)*PACKET_SIZE + lastPacketSize;
FiTestUtil.LOG.info("size=" + size + ", nPackets=" + nPackets
+ ", lastPacketSize=" + lastPacketSize);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf
).numDataNodes(REPLICATION + 2).build();
final FileSystem dfs = cluster.getFileSystem();
try {
final Path p = new Path("/" + methodName + "/foo");
final FSDataOutputStream out = createFile(dfs, p);
final long seed = r.nextLong();
final Random ran = new Random(seed);
ran.nextBytes(bytes);
out.write(bytes, 0, size);
out.close();
final FSDataInputStream in = dfs.open(p);
int totalRead = 0;
int nRead = 0;
while ((nRead = in.read(toRead, totalRead, size - totalRead)) > 0) {
totalRead += nRead;
}
Assert.assertEquals("Cannot read file.", size, totalRead);
for (int i = 0; i < size; i++) {
Assert.assertTrue("File content differ.", bytes[i] == toRead[i]);
}
}
finally {
dfs.close();
cluster.shutdown();
}
}
private static void initSlowDatanodeTest(DataTransferTest t, SleepAction a)
throws IOException {
t.fiCallReceivePacket.set(a);
t.fiReceiverOpWriteBlock.set(a);
t.fiStatusRead.set(a);
}
private void runTest17_19(String methodName, int dnIndex)
throws IOException {
FiTestUtil.LOG.info("Running " + methodName + " ...");
final DataTransferTest t = (DataTransferTest) DataTransferTestUtil
.initTest();
initSlowDatanodeTest(t, new SleepAction(methodName, 0, 0, MAX_SLEEP));
initSlowDatanodeTest(t, new SleepAction(methodName, 1, 0, MAX_SLEEP));
initSlowDatanodeTest(t, new SleepAction(methodName, 2, 0, MAX_SLEEP));
t.fiCallWritePacketToDisk.set(new CountdownDoosAction(methodName, dnIndex, 3));
t.fiPipelineErrorAfterInit.set(new VerificationAction(methodName, dnIndex));
writeSeveralPackets(methodName);
Assert.assertTrue(t.isSuccess());
}
private void runTest29_30(String methodName, int dnIndex) throws IOException {
FiTestUtil.LOG.info("Running " + methodName + " ...");
final DataTransferTest t = (DataTransferTest) DataTransferTestUtil
.initTest();
initSlowDatanodeTest(t, new SleepAction(methodName, 0, 0, MAX_SLEEP));
initSlowDatanodeTest(t, new SleepAction(methodName, 1, 0, MAX_SLEEP));
initSlowDatanodeTest(t, new SleepAction(methodName, 2, 0, MAX_SLEEP));
t.fiAfterDownstreamStatusRead.set(new CountdownOomAction(methodName, dnIndex, 3));
t.fiPipelineErrorAfterInit.set(new VerificationAction(methodName, dnIndex));
writeSeveralPackets(methodName);
Assert.assertTrue(t.isSuccess());
}
private void runTest34_35(String methodName, int dnIndex) throws IOException {
FiTestUtil.LOG.info("Running " + methodName + " ...");
final DataTransferTest t = (DataTransferTest) DataTransferTestUtil
.initTest();
t.fiAfterDownstreamStatusRead.set(new CountdownSleepAction(methodName, dnIndex, 0, 3));
t.fiPipelineErrorAfterInit.set(new VerificationAction(methodName, dnIndex));
writeSeveralPackets(methodName);
Assert.assertTrue(t.isSuccess());
}
/**
* Streaming:
* Randomize datanode speed, write several packets,
* DN0 throws a DiskOutOfSpaceError when it writes the third packet to disk.
* Client gets an IOException and determines DN0 bad.
*/
@Test
public void pipeline_Fi_17() throws IOException {
final String methodName = FiTestUtil.getMethodName();
runTest17_19(methodName, 0);
}
/**
* Streaming:
* Randomize datanode speed, write several packets,
* DN1 throws a DiskOutOfSpaceError when it writes the third packet to disk.
* Client gets an IOException and determines DN1 bad.
*/
@Test
public void pipeline_Fi_18() throws IOException {
final String methodName = FiTestUtil.getMethodName();
runTest17_19(methodName, 1);
}
/**
* Streaming:
* Randomize datanode speed, write several packets,
* DN2 throws a DiskOutOfSpaceError when it writes the third packet to disk.
* Client gets an IOException and determines DN2 bad.
*/
@Test
public void pipeline_Fi_19() throws IOException {
final String methodName = FiTestUtil.getMethodName();
runTest17_19(methodName, 2);
}
/**
* Streaming: Client writes several packets with DN0 very slow. Client
* finishes write successfully.
*/
@Test
public void pipeline_Fi_20() throws IOException {
final String methodName = FiTestUtil.getMethodName();
FiTestUtil.LOG.info("Running " + methodName + " ...");
final DataTransferTest t = (DataTransferTest) DataTransferTestUtil
.initTest();
initSlowDatanodeTest(t, new SleepAction(methodName, 0, MAX_SLEEP));
writeSeveralPackets(methodName);
}
/**
* Streaming: Client writes several packets with DN1 very slow. Client
* finishes write successfully.
*/
@Test
public void pipeline_Fi_21() throws IOException {
final String methodName = FiTestUtil.getMethodName();
FiTestUtil.LOG.info("Running " + methodName + " ...");
final DataTransferTest t = (DataTransferTest) DataTransferTestUtil
.initTest();
initSlowDatanodeTest(t, new SleepAction(methodName, 1, MAX_SLEEP));
writeSeveralPackets(methodName);
}
/**
* Streaming: Client writes several packets with DN2 very slow. Client
* finishes write successfully.
*/
@Test
public void pipeline_Fi_22() throws IOException {
final String methodName = FiTestUtil.getMethodName();
FiTestUtil.LOG.info("Running " + methodName + " ...");
final DataTransferTest t = (DataTransferTest) DataTransferTestUtil
.initTest();
initSlowDatanodeTest(t, new SleepAction(methodName, 2, MAX_SLEEP));
writeSeveralPackets(methodName);
}
/**
* Streaming: Randomize datanode speed, write several packets, DN1 throws a
* OutOfMemoryException when it receives the ack of the third packet from DN2.
* Client gets an IOException and determines DN1 bad.
*/
@Test
public void pipeline_Fi_29() throws IOException {
final String methodName = FiTestUtil.getMethodName();
runTest29_30(methodName, 1);
}
/**
* Streaming: Randomize datanode speed, write several packets, DN0 throws a
* OutOfMemoryException when it receives the ack of the third packet from DN1.
* Client gets an IOException and determines DN0 bad.
*/
@Test
public void pipeline_Fi_30() throws IOException {
final String methodName = FiTestUtil.getMethodName();
runTest29_30(methodName, 0);
}
/**
* Streaming: Write several packets, DN1 never responses when it receives the
* ack of the third packet from DN2. Client gets an IOException and determines
* DN1 bad.
*/
@Test
public void pipeline_Fi_34() throws IOException {
final String methodName = FiTestUtil.getMethodName();
runTest34_35(methodName, 1);
}
/**
* Streaming: Write several packets, DN0 never responses when it receives the
* ack of the third packet from DN1. Client gets an IOException and determines
* DN0 bad.
*/
@Test
public void pipeline_Fi_35() throws IOException {
final String methodName = FiTestUtil.getMethodName();
runTest34_35(methodName, 0);
}
}
| 11,084 | 37.356401 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestRefreshCallQueue.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.net.BindException;
import java.util.Random;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.tools.DFSAdmin;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class TestRefreshCallQueue {
private MiniDFSCluster cluster;
private Configuration config;
private FileSystem fs;
static int mockQueueConstructions;
static int mockQueuePuts;
private String callQueueConfigKey = "";
private final Random rand = new Random();
@Before
public void setUp() throws Exception {
// We want to count additional events, so we reset here
mockQueueConstructions = 0;
mockQueuePuts = 0;
int portRetries = 5;
int nnPort;
for (; portRetries > 0; --portRetries) {
// Pick a random port in the range [30000,60000).
nnPort = 30000 + rand.nextInt(30000);
config = new Configuration();
callQueueConfigKey = "ipc." + nnPort + ".callqueue.impl";
config.setClass(callQueueConfigKey,
MockCallQueue.class, BlockingQueue.class);
config.set("hadoop.security.authorization", "true");
FileSystem.setDefaultUri(config, "hdfs://localhost:" + nnPort);
fs = FileSystem.get(config);
try {
cluster = new MiniDFSCluster.Builder(config).nameNodePort(nnPort).build();
cluster.waitActive();
break;
} catch (BindException be) {
// Retry with a different port number.
}
}
if (portRetries == 0) {
// Bail if we get very unlucky with our choice of ports.
fail("Failed to pick an ephemeral port for the NameNode RPC server.");
}
}
@After
public void tearDown() throws Exception {
if(cluster!=null) {
cluster.shutdown();
}
}
@SuppressWarnings("serial")
public static class MockCallQueue<E> extends LinkedBlockingQueue<E> {
public MockCallQueue(int cap, String ns, Configuration conf) {
super(cap);
mockQueueConstructions++;
}
public void put(E e) throws InterruptedException {
super.put(e);
mockQueuePuts++;
}
}
// Returns true if mock queue was used for put
public boolean canPutInMockQueue() throws IOException {
int putsBefore = mockQueuePuts;
fs.exists(new Path("/")); // Make an RPC call
return mockQueuePuts > putsBefore;
}
@Test
public void testRefresh() throws Exception {
assertTrue("Mock queue should have been constructed", mockQueueConstructions > 0);
assertTrue("Puts are routed through MockQueue", canPutInMockQueue());
int lastMockQueueConstructions = mockQueueConstructions;
// Replace queue with the queue specified in core-site.xml, which would be the LinkedBlockingQueue
DFSAdmin admin = new DFSAdmin(config);
String [] args = new String[]{"-refreshCallQueue"};
int exitCode = admin.run(args);
assertEquals("DFSAdmin should return 0", 0, exitCode);
assertEquals("Mock queue should have no additional constructions", lastMockQueueConstructions, mockQueueConstructions);
try {
assertFalse("Puts are routed through LBQ instead of MockQueue", canPutInMockQueue());
} catch (IOException ioe){
fail("Could not put into queue at all");
}
}
}
| 4,460 | 32.795455 | 123 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.tools.DFSAdmin;
import org.apache.hadoop.ipc.RefreshHandler;
import org.apache.hadoop.ipc.RefreshRegistry;
import org.apache.hadoop.ipc.RefreshResponse;
import org.junit.Test;
import org.junit.Before;
import org.junit.After;
import org.junit.BeforeClass;
import org.junit.AfterClass;
import org.mockito.Mockito;
/**
* Before all tests, a MiniDFSCluster is spun up.
* Before each test, mock refresh handlers are created and registered.
* After each test, the mock handlers are unregistered.
* After all tests, the cluster is spun down.
*/
public class TestGenericRefresh {
private static MiniDFSCluster cluster;
private static Configuration config;
private static RefreshHandler firstHandler;
private static RefreshHandler secondHandler;
@BeforeClass
public static void setUpBeforeClass() throws Exception {
config = new Configuration();
config.set("hadoop.security.authorization", "true");
FileSystem.setDefaultUri(config, "hdfs://localhost:0");
cluster = new MiniDFSCluster.Builder(config).build();
cluster.waitActive();
}
@AfterClass
public static void tearDownBeforeClass() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
@Before
public void setUp() throws Exception {
// Register Handlers, first one just sends an ok response
firstHandler = Mockito.mock(RefreshHandler.class);
Mockito.stub(firstHandler.handleRefresh(Mockito.anyString(), Mockito.any(String[].class)))
.toReturn(RefreshResponse.successResponse());
RefreshRegistry.defaultRegistry().register("firstHandler", firstHandler);
// Second handler has conditional response for testing args
secondHandler = Mockito.mock(RefreshHandler.class);
Mockito.stub(secondHandler.handleRefresh("secondHandler", new String[]{"one", "two"}))
.toReturn(new RefreshResponse(3, "three"));
Mockito.stub(secondHandler.handleRefresh("secondHandler", new String[]{"one"}))
.toReturn(new RefreshResponse(2, "two"));
RefreshRegistry.defaultRegistry().register("secondHandler", secondHandler);
}
@After
public void tearDown() throws Exception {
RefreshRegistry.defaultRegistry().unregisterAll("firstHandler");
RefreshRegistry.defaultRegistry().unregisterAll("secondHandler");
}
@Test
public void testInvalidCommand() throws Exception {
DFSAdmin admin = new DFSAdmin(config);
String [] args = new String[]{"-refresh", "nn"};
int exitCode = admin.run(args);
assertEquals("DFSAdmin should fail due to bad args", -1, exitCode);
}
@Test
public void testInvalidIdentifier() throws Exception {
DFSAdmin admin = new DFSAdmin(config);
String [] args = new String[]{"-refresh", "localhost:" +
cluster.getNameNodePort(), "unregisteredIdentity"};
int exitCode = admin.run(args);
assertEquals("DFSAdmin should fail due to no handler registered", -1, exitCode);
}
@Test
public void testValidIdentifier() throws Exception {
DFSAdmin admin = new DFSAdmin(config);
String[] args = new String[]{"-refresh",
"localhost:" + cluster.getNameNodePort(), "firstHandler"};
int exitCode = admin.run(args);
assertEquals("DFSAdmin should succeed", 0, exitCode);
Mockito.verify(firstHandler).handleRefresh("firstHandler", new String[]{});
// Second handler was never called
Mockito.verify(secondHandler, Mockito.never())
.handleRefresh(Mockito.anyString(), Mockito.any(String[].class));
}
@Test
public void testVariableArgs() throws Exception {
DFSAdmin admin = new DFSAdmin(config);
String[] args = new String[]{"-refresh", "localhost:" +
cluster.getNameNodePort(), "secondHandler", "one"};
int exitCode = admin.run(args);
assertEquals("DFSAdmin should return 2", 2, exitCode);
exitCode = admin.run(new String[]{"-refresh", "localhost:" +
cluster.getNameNodePort(), "secondHandler", "one", "two"});
assertEquals("DFSAdmin should now return 3", 3, exitCode);
Mockito.verify(secondHandler).handleRefresh("secondHandler", new String[]{"one"});
Mockito.verify(secondHandler).handleRefresh("secondHandler", new String[]{"one", "two"});
}
@Test
public void testUnregistration() throws Exception {
RefreshRegistry.defaultRegistry().unregisterAll("firstHandler");
// And now this should fail
DFSAdmin admin = new DFSAdmin(config);
String[] args = new String[]{"-refresh", "localhost:" +
cluster.getNameNodePort(), "firstHandler"};
int exitCode = admin.run(args);
assertEquals("DFSAdmin should return -1", -1, exitCode);
}
@Test
public void testUnregistrationReturnValue() {
RefreshHandler mockHandler = Mockito.mock(RefreshHandler.class);
RefreshRegistry.defaultRegistry().register("test", mockHandler);
boolean ret = RefreshRegistry.defaultRegistry().unregister("test", mockHandler);
assertTrue(ret);
}
@Test
public void testMultipleRegistration() throws Exception {
RefreshRegistry.defaultRegistry().register("sharedId", firstHandler);
RefreshRegistry.defaultRegistry().register("sharedId", secondHandler);
// this should trigger both
DFSAdmin admin = new DFSAdmin(config);
String[] args = new String[]{"-refresh", "localhost:" +
cluster.getNameNodePort(), "sharedId", "one"};
int exitCode = admin.run(args);
assertEquals(-1, exitCode); // -1 because one of the responses is unregistered
// verify we called both
Mockito.verify(firstHandler).handleRefresh("sharedId", new String[]{"one"});
Mockito.verify(secondHandler).handleRefresh("sharedId", new String[]{"one"});
RefreshRegistry.defaultRegistry().unregisterAll("sharedId");
}
@Test
public void testMultipleReturnCodeMerging() throws Exception {
// Two handlers which return two non-zero values
RefreshHandler handlerOne = Mockito.mock(RefreshHandler.class);
Mockito.stub(handlerOne.handleRefresh(Mockito.anyString(), Mockito.any(String[].class)))
.toReturn(new RefreshResponse(23, "Twenty Three"));
RefreshHandler handlerTwo = Mockito.mock(RefreshHandler.class);
Mockito.stub(handlerTwo.handleRefresh(Mockito.anyString(), Mockito.any(String[].class)))
.toReturn(new RefreshResponse(10, "Ten"));
// Then registered to the same ID
RefreshRegistry.defaultRegistry().register("shared", handlerOne);
RefreshRegistry.defaultRegistry().register("shared", handlerTwo);
// We refresh both
DFSAdmin admin = new DFSAdmin(config);
String[] args = new String[]{"-refresh", "localhost:" +
cluster.getNameNodePort(), "shared"};
int exitCode = admin.run(args);
assertEquals(-1, exitCode); // We get -1 because of our logic for melding non-zero return codes
// Verify we called both
Mockito.verify(handlerOne).handleRefresh("shared", new String[]{});
Mockito.verify(handlerTwo).handleRefresh("shared", new String[]{});
RefreshRegistry.defaultRegistry().unregisterAll("shared");
}
@Test
public void testExceptionResultsInNormalError() throws Exception {
// In this test, we ensure that all handlers are called even if we throw an exception in one
RefreshHandler exceptionalHandler = Mockito.mock(RefreshHandler.class);
Mockito.stub(exceptionalHandler.handleRefresh(Mockito.anyString(), Mockito.any(String[].class)))
.toThrow(new RuntimeException("Exceptional Handler Throws Exception"));
RefreshHandler otherExceptionalHandler = Mockito.mock(RefreshHandler.class);
Mockito.stub(otherExceptionalHandler.handleRefresh(Mockito.anyString(), Mockito.any(String[].class)))
.toThrow(new RuntimeException("More Exceptions"));
RefreshRegistry.defaultRegistry().register("exceptional", exceptionalHandler);
RefreshRegistry.defaultRegistry().register("exceptional", otherExceptionalHandler);
DFSAdmin admin = new DFSAdmin(config);
String[] args = new String[]{"-refresh", "localhost:" +
cluster.getNameNodePort(), "exceptional"};
int exitCode = admin.run(args);
assertEquals(-1, exitCode); // Exceptions result in a -1
Mockito.verify(exceptionalHandler).handleRefresh("exceptional", new String[]{});
Mockito.verify(otherExceptionalHandler).handleRefresh("exceptional", new String[]{});
RefreshRegistry.defaultRegistry().unregisterAll("exceptional");
}
}
| 9,486 | 39.542735 | 105 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/CLITestCmdDFS.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cli;
import org.apache.hadoop.cli.util.CLICommandDFSAdmin;
import org.apache.hadoop.cli.util.CLICommandTypes;
import org.apache.hadoop.cli.util.CLITestCmd;
import org.apache.hadoop.cli.util.CommandExecutor;
import org.apache.hadoop.cli.util.FSCmdExecutor;
import org.apache.hadoop.hdfs.tools.DFSAdmin;
public class CLITestCmdDFS extends CLITestCmd {
public CLITestCmdDFS(String str, CLICommandTypes type) {
super(str, type);
}
@Override
public CommandExecutor getExecutor(String tag) throws IllegalArgumentException {
if (getType() instanceof CLICommandDFSAdmin)
return new FSCmdExecutor(tag, new DFSAdmin());
return super.getExecutor(tag);
}
}
| 1,515 | 37.871795 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCacheAdminCLI.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cli;
import static org.junit.Assert.assertTrue;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.cli.util.CLICommand;
import org.apache.hadoop.cli.util.CLICommandCacheAdmin;
import org.apache.hadoop.cli.util.CLICommandTypes;
import org.apache.hadoop.cli.util.CLITestCmd;
import org.apache.hadoop.cli.util.CacheAdminCmdExecutor;
import org.apache.hadoop.cli.util.CommandExecutor;
import org.apache.hadoop.cli.util.CommandExecutor.Result;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HDFSPolicyProvider;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.tools.CacheAdmin;
import org.apache.hadoop.security.authorize.PolicyProvider;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.xml.sax.SAXException;
public class TestCacheAdminCLI extends CLITestHelper {
public static final Log LOG = LogFactory.getLog(TestCacheAdminCLI.class);
protected MiniDFSCluster dfsCluster = null;
protected FileSystem fs = null;
protected String namenode = null;
@Before
@Override
public void setUp() throws Exception {
super.setUp();
conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
HDFSPolicyProvider.class, PolicyProvider.class);
// Many of the tests expect a replication value of 1 in the output
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
dfsCluster.waitClusterUp();
namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
username = System.getProperty("user.name");
fs = dfsCluster.getFileSystem();
assertTrue("Not a HDFS: "+fs.getUri(),
fs instanceof DistributedFileSystem);
}
@After
@Override
public void tearDown() throws Exception {
if (fs != null) {
fs.close();
}
if (dfsCluster != null) {
dfsCluster.shutdown();
}
Thread.sleep(2000);
super.tearDown();
}
@Override
protected String getTestFile() {
return "testCacheAdminConf.xml";
}
@Override
protected TestConfigFileParser getConfigParser() {
return new TestConfigFileParserCacheAdmin();
}
private class TestConfigFileParserCacheAdmin extends
CLITestHelper.TestConfigFileParser {
@Override
public void endElement(String uri, String localName, String qName)
throws SAXException {
if (qName.equals("cache-admin-command")) {
if (testCommands != null) {
testCommands.add(new CLITestCmdCacheAdmin(charString,
new CLICommandCacheAdmin()));
} else if (cleanupCommands != null) {
cleanupCommands.add(new CLITestCmdCacheAdmin(charString,
new CLICommandCacheAdmin()));
}
} else {
super.endElement(uri, localName, qName);
}
}
}
private class CLITestCmdCacheAdmin extends CLITestCmd {
public CLITestCmdCacheAdmin(String str, CLICommandTypes type) {
super(str, type);
}
@Override
public CommandExecutor getExecutor(String tag)
throws IllegalArgumentException {
if (getType() instanceof CLICommandCacheAdmin) {
return new CacheAdminCmdExecutor(tag, new CacheAdmin(conf));
}
return super.getExecutor(tag);
}
}
@Override
protected Result execute(CLICommand cmd) throws Exception {
return cmd.getExecutor("").executeCommand(cmd.getCmd());
}
@Test
@Override
public void testAll () {
super.testAll();
}
}
| 4,473 | 30.507042 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/CLITestHelperDFS.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cli;
import org.apache.hadoop.cli.util.CLICommandDFSAdmin;
import org.xml.sax.SAXException;
public class CLITestHelperDFS extends CLITestHelper {
@Override
protected TestConfigFileParser getConfigParser() {
return new TestConfigFileParserDFS();
}
class TestConfigFileParserDFS extends CLITestHelper.TestConfigFileParser {
@Override
public void endElement(String uri, String localName, String qName)
throws SAXException {
if (qName.equals("dfs-admin-command")) {
if (testCommands != null) {
testCommands.add(new CLITestCmdDFS(charString,
new CLICommandDFSAdmin()));
} else if (cleanupCommands != null) {
cleanupCommands.add(new CLITestCmdDFS(charString,
new CLICommandDFSAdmin()));
}
} else {
super.endElement(uri, localName, qName);
}
}
}
}
| 1,716 | 34.770833 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestXAttrCLI.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cli;
import static org.junit.Assert.assertTrue;
import org.apache.hadoop.cli.util.CLICommand;
import org.apache.hadoop.cli.util.CommandExecutor.Result;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HDFSPolicyProvider;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.security.authorize.PolicyProvider;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class TestXAttrCLI extends CLITestHelperDFS {
protected MiniDFSCluster dfsCluster = null;
protected FileSystem fs = null;
protected String namenode = null;
@Before
@Override
public void setUp() throws Exception {
super.setUp();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
HDFSPolicyProvider.class, PolicyProvider.class);
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
dfsCluster.waitClusterUp();
namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
username = System.getProperty("user.name");
fs = dfsCluster.getFileSystem();
assertTrue("Not a HDFS: "+fs.getUri(),
fs instanceof DistributedFileSystem);
}
@Override
protected String getTestFile() {
return "testXAttrConf.xml";
}
@After
@Override
public void tearDown() throws Exception {
if (fs != null) {
fs.close();
}
if (dfsCluster != null) {
dfsCluster.shutdown();
}
Thread.sleep(2000);
super.tearDown();
}
@Override
protected String expandCommand(final String cmd) {
String expCmd = cmd;
expCmd = expCmd.replaceAll("NAMENODE", namenode);
expCmd = expCmd.replaceAll("#LF#",
System.getProperty("line.separator"));
expCmd = super.expandCommand(expCmd);
return expCmd;
}
@Override
protected Result execute(CLICommand cmd) throws Exception {
return cmd.getExecutor(namenode).executeCommand(cmd.getCmd());
}
@Test
@Override
public void testAll () {
super.testAll();
}
}
| 3,057 | 29.58 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLI.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cli;
import org.apache.hadoop.cli.util.CLICommand;
import org.apache.hadoop.cli.util.CommandExecutor.Result;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class TestAclCLI extends CLITestHelperDFS {
private MiniDFSCluster cluster = null;
private FileSystem fs = null;
private String namenode = null;
private String username = null;
@Before
@Override
public void setUp() throws Exception {
super.setUp();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
fs = cluster.getFileSystem();
namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
username = System.getProperty("user.name");
}
@After
@Override
public void tearDown() throws Exception {
super.tearDown();
if (fs != null) {
fs.close();
}
if (cluster != null) {
cluster.shutdown();
}
}
@Override
protected String getTestFile() {
return "testAclCLI.xml";
}
@Override
protected String expandCommand(final String cmd) {
String expCmd = cmd;
expCmd = expCmd.replaceAll("NAMENODE", namenode);
expCmd = expCmd.replaceAll("USERNAME", username);
expCmd = expCmd.replaceAll("#LF#",
System.getProperty("line.separator"));
expCmd = super.expandCommand(expCmd);
return expCmd;
}
@Override
protected Result execute(CLICommand cmd) throws Exception {
return cmd.getExecutor(namenode).executeCommand(cmd.getCmd());
}
@Test
@Override
public void testAll() {
super.testAll();
}
}
| 2,572 | 29.270588 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cli;
import static org.junit.Assert.assertTrue;
import org.apache.hadoop.cli.util.CLICommand;
import org.apache.hadoop.cli.util.CommandExecutor.Result;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HDFSPolicyProvider;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.security.authorize.PolicyProvider;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class TestHDFSCLI extends CLITestHelperDFS {
protected MiniDFSCluster dfsCluster = null;
protected FileSystem fs = null;
protected String namenode = null;
@Before
@Override
public void setUp() throws Exception {
super.setUp();
conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
HDFSPolicyProvider.class, PolicyProvider.class);
// Many of the tests expect a replication value of 1 in the output
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
// Build racks and hosts configuration to test dfsAdmin -printTopology
String [] racks = {"/rack1", "/rack1", "/rack2", "/rack2",
"/rack2", "/rack3", "/rack4", "/rack4" };
String [] hosts = {"host1", "host2", "host3", "host4",
"host5", "host6", "host7", "host8" };
dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(8)
.racks(racks)
.hosts(hosts)
.build();
dfsCluster.waitClusterUp();
namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
username = System.getProperty("user.name");
fs = dfsCluster.getFileSystem();
assertTrue("Not a HDFS: "+fs.getUri(),
fs instanceof DistributedFileSystem);
}
@Override
protected String getTestFile() {
return "testHDFSConf.xml";
}
@After
@Override
public void tearDown() throws Exception {
if (fs != null) {
fs.close();
}
if (dfsCluster != null) {
dfsCluster.shutdown();
}
Thread.sleep(2000);
super.tearDown();
}
@Override
protected String expandCommand(final String cmd) {
String expCmd = cmd;
expCmd = expCmd.replaceAll("NAMENODE", namenode);
expCmd = super.expandCommand(expCmd);
return expCmd;
}
@Override
protected Result execute(CLICommand cmd) throws Exception {
return cmd.getExecutor(namenode).executeCommand(cmd.getCmd());
}
@Test
@Override
public void testAll () {
super.testAll();
}
}
| 3,477 | 31.504673 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cli;
import java.io.File;
import java.io.IOException;
import java.security.NoSuchAlgorithmException;
import java.util.UUID;
import static org.junit.Assert.assertTrue;
import org.apache.hadoop.cli.util.CLICommand;
import org.apache.hadoop.cli.util.CLICommandCryptoAdmin;
import org.apache.hadoop.cli.util.CLICommandTypes;
import org.apache.hadoop.cli.util.CLITestCmd;
import org.apache.hadoop.cli.util.CryptoAdminCmdExecutor;
import org.apache.hadoop.cli.util.CommandExecutor;
import org.apache.hadoop.cli.util.CommandExecutor.Result;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.JavaKeyStoreProvider;
import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.crypto.key.KeyProviderFactory;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HDFSPolicyProvider;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.tools.CryptoAdmin;
import org.apache.hadoop.security.authorize.PolicyProvider;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.xml.sax.SAXException;
public class TestCryptoAdminCLI extends CLITestHelperDFS {
protected MiniDFSCluster dfsCluster = null;
protected FileSystem fs = null;
protected String namenode = null;
private static File tmpDir;
@Before
@Override
public void setUp() throws Exception {
super.setUp();
conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
HDFSPolicyProvider.class, PolicyProvider.class);
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
tmpDir = new File(System.getProperty("test.build.data", "target"),
UUID.randomUUID().toString()).getAbsoluteFile();
final Path jksPath = new Path(tmpDir.toString(), "test.jks");
conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri());
dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
dfsCluster.waitClusterUp();
createAKey("mykey", conf);
namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
username = System.getProperty("user.name");
fs = dfsCluster.getFileSystem();
assertTrue("Not an HDFS: " + fs.getUri(),
fs instanceof DistributedFileSystem);
}
@After
@Override
public void tearDown() throws Exception {
if (fs != null) {
fs.close();
}
if (dfsCluster != null) {
dfsCluster.shutdown();
}
Thread.sleep(2000);
super.tearDown();
}
/* Helper function to create a key in the Key Provider. */
private void createAKey(String keyName, Configuration conf)
throws NoSuchAlgorithmException, IOException {
final KeyProvider provider =
dfsCluster.getNameNode().getNamesystem().getProvider();
final KeyProvider.Options options = KeyProvider.options(conf);
provider.createKey(keyName, options);
provider.flush();
}
@Override
protected String getTestFile() {
return "testCryptoConf.xml";
}
@Override
protected String expandCommand(final String cmd) {
String expCmd = cmd;
expCmd = expCmd.replaceAll("NAMENODE", namenode);
expCmd = expCmd.replaceAll("#LF#",
System.getProperty("line.separator"));
expCmd = super.expandCommand(expCmd);
return expCmd;
}
@Override
protected TestConfigFileParser getConfigParser() {
return new TestConfigFileParserCryptoAdmin();
}
private class TestConfigFileParserCryptoAdmin extends
CLITestHelper.TestConfigFileParser {
@Override
public void endElement(String uri, String localName, String qName)
throws SAXException {
if (qName.equals("crypto-admin-command")) {
if (testCommands != null) {
testCommands.add(new CLITestCmdCryptoAdmin(charString,
new CLICommandCryptoAdmin()));
} else if (cleanupCommands != null) {
cleanupCommands.add(new CLITestCmdCryptoAdmin(charString,
new CLICommandCryptoAdmin()));
}
} else {
super.endElement(uri, localName, qName);
}
}
}
private class CLITestCmdCryptoAdmin extends CLITestCmd {
public CLITestCmdCryptoAdmin(String str, CLICommandTypes type) {
super(str, type);
}
@Override
public CommandExecutor getExecutor(String tag)
throws IllegalArgumentException {
if (getType() instanceof CLICommandCryptoAdmin) {
return new CryptoAdminCmdExecutor(tag, new CryptoAdmin(conf));
}
return super.getExecutor(tag);
}
}
@Override
protected Result execute(CLICommand cmd) throws Exception {
return cmd.getExecutor(namenode).executeCommand(cmd.getCmd());
}
@Test
@Override
public void testAll () {
super.testAll();
}
}
| 5,710 | 32.203488 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/util/CLICommandCacheAdmin.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cli.util;
public class CLICommandCacheAdmin implements CLICommandTypes {
}
| 915 | 40.636364 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/util/CacheAdminCmdExecutor.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cli.util;
import org.apache.hadoop.hdfs.tools.CacheAdmin;
import org.apache.hadoop.util.ToolRunner;
public class CacheAdminCmdExecutor extends CommandExecutor {
protected String namenode = null;
protected CacheAdmin admin = null;
public CacheAdminCmdExecutor(String namenode, CacheAdmin admin) {
this.namenode = namenode;
this.admin = admin;
}
@Override
protected void execute(final String cmd) throws Exception {
String[] args = getCommandAsArgs(cmd, "NAMENODE", this.namenode);
ToolRunner.run(admin, args);
}
}
| 1,379 | 35.315789 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/util/CLICommandDFSAdmin.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cli.util;
public class CLICommandDFSAdmin implements CLICommandTypes {
}
| 913 | 40.545455 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/util/CryptoAdminCmdExecutor.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cli.util;
import org.apache.hadoop.hdfs.tools.CryptoAdmin;
import org.apache.hadoop.util.ToolRunner;
public class CryptoAdminCmdExecutor extends CommandExecutor {
protected String namenode = null;
protected CryptoAdmin admin = null;
public CryptoAdminCmdExecutor(String namenode, CryptoAdmin admin) {
this.namenode = namenode;
this.admin = admin;
}
@Override
protected void execute(final String cmd) throws Exception {
String[] args = getCommandAsArgs(cmd, "NAMENODE", this.namenode);
ToolRunner.run(admin, args);
}
}
| 1,384 | 35.447368 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/util/CLICommandCryptoAdmin.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cli.util;
public class CLICommandCryptoAdmin implements CLICommandTypes {
}
| 916 | 40.681818 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestDelegationTokenFetcher.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Iterator;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.tools.DelegationTokenFetcher;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Before;
import org.junit.Test;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import static org.mockito.Matchers.*;
public class TestDelegationTokenFetcher {
private DistributedFileSystem dfs;
private Configuration conf;
private URI uri;
private static final String SERVICE_VALUE = "localhost:2005";
private static final String tokenFile = "file.dta";
@Before
public void init() throws URISyntaxException, IOException {
dfs = mock(DistributedFileSystem.class);
conf = new Configuration();
uri = new URI("hdfs://" + SERVICE_VALUE);
FileSystemTestHelper.addFileSystemForTesting(uri, conf, dfs);
}
/**
* Verify that when the DelegationTokenFetcher runs, it talks to the Namenode,
* pulls out the correct user's token and successfully serializes it to disk.
*/
@Test
public void expectedTokenIsRetrievedFromDFS() throws Exception {
final byte[] ident = new DelegationTokenIdentifier(new Text("owner"),
new Text("renewer"), new Text("realuser")).getBytes();
final byte[] pw = new byte[] { 42 };
final Text service = new Text(uri.toString());
// Create a token for the fetcher to fetch, wire NN to return it when asked
// for this particular user.
final Token<DelegationTokenIdentifier> t =
new Token<DelegationTokenIdentifier>(ident, pw, FakeRenewer.KIND, service);
when(dfs.addDelegationTokens(eq((String) null), any(Credentials.class))).thenAnswer(
new Answer<Token<?>[]>() {
@Override
public Token<?>[] answer(InvocationOnMock invocation) {
Credentials creds = (Credentials)invocation.getArguments()[1];
creds.addToken(service, t);
return new Token<?>[]{t};
}
});
when(dfs.getUri()).thenReturn(uri);
FakeRenewer.reset();
FileSystem fileSys = FileSystem.getLocal(conf);
try {
DelegationTokenFetcher.main(new String[] { "-fs", uri.toString(),
tokenFile });
Path p = new Path(fileSys.getWorkingDirectory(), tokenFile);
Credentials creds = Credentials.readTokenStorageFile(p, conf);
Iterator<Token<?>> itr = creds.getAllTokens().iterator();
// make sure we got back exactly the 1 token we expected
assertTrue(itr.hasNext());
assertEquals(t, itr.next());
assertTrue(!itr.hasNext());
DelegationTokenFetcher.main(new String[] { "--print", tokenFile });
DelegationTokenFetcher.main(new String[] { "--renew", tokenFile });
assertEquals(t, FakeRenewer.lastRenewed);
FakeRenewer.reset();
DelegationTokenFetcher.main(new String[] { "--cancel", tokenFile });
assertEquals(t, FakeRenewer.lastCanceled);
} finally {
fileSys.delete(new Path(tokenFile), true);
}
}
@Test
public void testDelegationTokenWithoutRenewer() throws Exception {
conf.setBoolean(DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
.build();
FileSystem localFs = FileSystem.getLocal(conf);
try {
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
// Should be able to fetch token without renewer.
uri = fs.getUri();
DelegationTokenFetcher.main(new String[] { "-fs", uri.toString(),
tokenFile });
Path p = new Path(localFs.getWorkingDirectory(), tokenFile);
Credentials creds = Credentials.readTokenStorageFile(p, conf);
Iterator<Token<?>> itr = creds.getAllTokens().iterator();
// make sure we got back exactly the 1 token we expected
assertTrue(itr.hasNext());
assertNotNull("Token without renewer shouldn't be null", itr.next());
assertTrue(!itr.hasNext());
try {
// Without renewer renewal of token should fail.
DelegationTokenFetcher.main(new String[] { "--renew", tokenFile });
fail("Should have failed to renew");
} catch (AccessControlException e) {
GenericTestUtils.assertExceptionContains(
"tried to renew a token without a renewer", e);
}
} finally {
cluster.shutdown();
localFs.delete(new Path(tokenFile), true);
}
}
}
| 6,226 | 39.967105 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools;
import java.util.HashSet;
import org.apache.hadoop.conf.TestConfigurationFieldsBase;
import org.apache.hadoop.hdfs.DFSConfigKeys;
/**
* Unit test class to compare the following MR Configuration classes:
* <p></p>
* {@link org.apache.hadoop.hdfs.DFSConfigKeys}
* <p></p>
* against hdfs-default.xml for missing properties. Currently only
* throws an error if the class is missing a property.
* <p></p>
* Refer to {@link org.apache.hadoop.conf.TestConfigurationFieldsBase}
* for how this class works.
*/
public class TestHdfsConfigFields extends TestConfigurationFieldsBase {
@Override
public void initializeMemberVariables() {
xmlFilename = new String("hdfs-default.xml");
configurationClasses = new Class[] { DFSConfigKeys.class };
// Set error modes
errorIfMissingConfigProps = true;
errorIfMissingXmlProps = false;
// Allocate
xmlPropsToSkipCompare = new HashSet<String>();
xmlPrefixToSkipCompare = new HashSet<String>();
// Used in native code fuse_connect.c
xmlPropsToSkipCompare.add("hadoop.fuse.timer.period");
xmlPropsToSkipCompare.add("hadoop.fuse.connection.timeout");
// Used dynamically as part of DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX
xmlPropsToSkipCompare.add("dfs.namenode.edits.journal-plugin.qjournal");
// Example (not real) property in hdfs-default.xml
xmlPropsToSkipCompare.add("dfs.ha.namenodes.EXAMPLENAMESERVICE");
// Defined in org.apache.hadoop.fs.CommonConfigurationKeys
xmlPropsToSkipCompare.add("hadoop.user.group.metrics.percentiles.intervals");
// Used oddly by DataNode to create new config String
xmlPropsToSkipCompare.add("hadoop.hdfs.configuration.version");
// Kept in the NfsConfiguration class in the hadoop-hdfs-nfs module
xmlPrefixToSkipCompare.add("nfs");
// Not a hardcoded property. Used by SaslRpcClient
xmlPrefixToSkipCompare.add("dfs.namenode.kerberos.principal.pattern");
// Skip comparing in branch-2. Removed in trunk with HDFS-7985.
xmlPropsToSkipCompare.add("dfs.webhdfs.enabled");
// Some properties have moved to HdfsClientConfigKeys
xmlPropsToSkipCompare.add("dfs.client.short.circuit.replica.stale.threshold.ms");
// Ignore SpanReceiveHost properties
xmlPropsToSkipCompare.add("dfs.htrace.spanreceiver.classes");
xmlPropsToSkipCompare.add("dfs.client.htrace.spanreceiver.classes");
}
}
| 3,243 | 37.164706 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestTools.java
|
package org.apache.hadoop.tools;
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import static org.junit.Assert.*;
import java.io.ByteArrayOutputStream;
import java.io.PipedInputStream;
import java.io.PipedOutputStream;
import java.io.PrintStream;
import org.apache.hadoop.hdfs.tools.DFSAdmin;
import org.apache.hadoop.hdfs.tools.DelegationTokenFetcher;
import org.apache.hadoop.hdfs.tools.JMXGet;
import org.apache.hadoop.util.ExitUtil;
import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.util.ExitUtil.ExitException;
import org.junit.BeforeClass;
import org.junit.Test;
import com.google.common.collect.ImmutableSet;
import com.google.common.io.ByteStreams;
public class TestTools {
private static final int PIPE_BUFFER_SIZE = 1024 * 5;
private final static String INVALID_OPTION = "-invalidOption";
private static final String[] OPTIONS = new String[2];
@BeforeClass
public static void before() {
ExitUtil.disableSystemExit();
OPTIONS[1] = INVALID_OPTION;
}
@Test
public void testDelegationTokenFetcherPrintUsage() {
String pattern = "Options:";
checkOutput(new String[] { "-help" }, pattern, System.out,
DelegationTokenFetcher.class);
}
@Test
public void testDelegationTokenFetcherErrorOption() {
String pattern = "ERROR: Only specify cancel, renew or print.";
checkOutput(new String[] { "-cancel", "-renew" }, pattern, System.err,
DelegationTokenFetcher.class);
}
@Test
public void testJMXToolHelp() {
String pattern = "usage: jmxget options are:";
checkOutput(new String[] { "-help" }, pattern, System.out, JMXGet.class);
}
@Test
public void testJMXToolAdditionParameter() {
String pattern = "key = -addition";
checkOutput(new String[] { "-service=NameNode", "-server=localhost",
"-addition" }, pattern, System.err, JMXGet.class);
}
@Test
public void testDFSAdminInvalidUsageHelp() {
ImmutableSet<String> args = ImmutableSet.of("-report", "-saveNamespace",
"-rollEdits", "-restoreFailedStorage", "-refreshNodes",
"-finalizeUpgrade", "-metasave", "-refreshUserToGroupsMappings",
"-printTopology", "-refreshNamenodes", "-deleteBlockPool",
"-setBalancerBandwidth", "-fetchImage");
try {
for (String arg : args)
assertTrue(ToolRunner.run(new DFSAdmin(), fillArgs(arg)) == -1);
assertTrue(ToolRunner.run(new DFSAdmin(),
new String[] { "-help", "-some" }) == 0);
} catch (Exception e) {
fail("testDFSAdminHelp error" + e);
}
String pattern = "Usage: hdfs dfsadmin";
checkOutput(new String[] { "-cancel", "-renew" }, pattern, System.err,
DFSAdmin.class);
}
private static String[] fillArgs(String arg) {
OPTIONS[0] = arg;
return OPTIONS;
}
private void checkOutput(String[] args, String pattern, PrintStream out,
Class<?> clazz) {
ByteArrayOutputStream outBytes = new ByteArrayOutputStream();
try {
PipedOutputStream pipeOut = new PipedOutputStream();
PipedInputStream pipeIn = new PipedInputStream(pipeOut, PIPE_BUFFER_SIZE);
if (out == System.out) {
System.setOut(new PrintStream(pipeOut));
} else if (out == System.err) {
System.setErr(new PrintStream(pipeOut));
}
if (clazz == DelegationTokenFetcher.class) {
expectDelegationTokenFetcherExit(args);
} else if (clazz == JMXGet.class) {
expectJMXGetExit(args);
} else if (clazz == DFSAdmin.class) {
expectDfsAdminPrint(args);
}
pipeOut.close();
ByteStreams.copy(pipeIn, outBytes);
pipeIn.close();
assertTrue(new String(outBytes.toByteArray()).contains(pattern));
} catch (Exception ex) {
fail("checkOutput error " + ex);
}
}
private void expectDfsAdminPrint(String[] args) {
try {
ToolRunner.run(new DFSAdmin(), args);
} catch (Exception ex) {
fail("expectDelegationTokenFetcherExit ex error " + ex);
}
}
private static void expectDelegationTokenFetcherExit(String[] args) {
try {
DelegationTokenFetcher.main(args);
fail("should call exit");
} catch (ExitException e) {
ExitUtil.resetFirstExitException();
} catch (Exception ex) {
fail("expectDelegationTokenFetcherExit ex error " + ex);
}
}
private static void expectJMXGetExit(String[] args) {
try {
JMXGet.main(args);
fail("should call exit");
} catch (ExitException e) {
ExitUtil.resetFirstExitException();
} catch (Exception ex) {
fail("expectJMXGetExit ex error " + ex);
}
}
}
| 5,399 | 32.540373 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools;
import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.IOException;
import java.io.PipedInputStream;
import java.io.PipedOutputStream;
import java.io.PrintStream;
import java.lang.management.ManagementFactory;
import java.util.Random;
import java.util.Set;
import javax.management.MBeanServerConnection;
import javax.management.ObjectName;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.tools.JMXGet;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
/**
* Startup and checkpoint tests
*
*/
public class TestJMXGet {
private Configuration config;
private MiniDFSCluster cluster;
static final long seed = 0xAAAAEEFL;
static final int blockSize = 4096;
static final int fileSize = 8192;
private void writeFile(FileSystem fileSys, Path name, int repl)
throws IOException {
FSDataOutputStream stm = fileSys.create(name, true,
fileSys.getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
(short)repl, blockSize);
byte[] buffer = new byte[fileSize];
Random rand = new Random(seed);
rand.nextBytes(buffer);
stm.write(buffer);
stm.close();
}
@Before
public void setUp() throws Exception {
config = new HdfsConfiguration();
}
/**
* clean up
*/
@After
public void tearDown() throws Exception {
if(cluster.isClusterUp())
cluster.shutdown();
File data_dir = new File(cluster.getDataDirectory());
if(data_dir.exists() && !FileUtil.fullyDelete(data_dir)) {
throw new IOException("Could not delete hdfs directory in tearDown '"
+ data_dir + "'");
}
}
/**
* test JMX connection to NameNode..
* @throws Exception
*/
@Test
public void testNameNode() throws Exception {
int numDatanodes = 2;
cluster = new MiniDFSCluster.Builder(config).numDataNodes(numDatanodes).build();
cluster.waitActive();
writeFile(cluster.getFileSystem(), new Path("/test1"), 2);
JMXGet jmx = new JMXGet();
String serviceName = "NameNode";
jmx.setService(serviceName);
jmx.init(); // default lists namenode mbeans only
assertTrue("error printAllValues", checkPrintAllValues(jmx));
//get some data from different source
assertEquals(numDatanodes, Integer.parseInt(
jmx.getValue("NumLiveDataNodes")));
assertGauge("CorruptBlocks", Long.parseLong(jmx.getValue("CorruptBlocks")),
getMetrics("FSNamesystem"));
assertEquals(numDatanodes, Integer.parseInt(
jmx.getValue("NumOpenConnections")));
cluster.shutdown();
MBeanServerConnection mbsc = ManagementFactory.getPlatformMBeanServer();
ObjectName query = new ObjectName("Hadoop:service=" + serviceName + ",*");
Set<ObjectName> names = mbsc.queryNames(query, null);
assertTrue("No beans should be registered for " + serviceName, names.isEmpty());
}
private static boolean checkPrintAllValues(JMXGet jmx) throws Exception {
int size = 0;
byte[] bytes = null;
String pattern = "List of all the available keys:";
PipedOutputStream pipeOut = new PipedOutputStream();
PipedInputStream pipeIn = new PipedInputStream(pipeOut);
System.setErr(new PrintStream(pipeOut));
jmx.printAllValues();
if ((size = pipeIn.available()) != 0) {
bytes = new byte[size];
pipeIn.read(bytes, 0, bytes.length);
}
pipeOut.close();
pipeIn.close();
return bytes != null ? new String(bytes).contains(pattern) : false;
}
/**
* test JMX connection to DataNode..
* @throws Exception
*/
@Test
public void testDataNode() throws Exception {
int numDatanodes = 2;
cluster = new MiniDFSCluster.Builder(config).numDataNodes(numDatanodes).build();
cluster.waitActive();
writeFile(cluster.getFileSystem(), new Path("/test"), 2);
JMXGet jmx = new JMXGet();
String serviceName = "DataNode";
jmx.setService(serviceName);
jmx.init();
assertEquals(fileSize, Integer.parseInt(jmx.getValue("BytesWritten")));
cluster.shutdown();
MBeanServerConnection mbsc = ManagementFactory.getPlatformMBeanServer();
ObjectName query = new ObjectName("Hadoop:service=" + serviceName + ",*");
Set<ObjectName> names = mbsc.queryNames(query, null);
assertTrue("No beans should be registered for " + serviceName, names.isEmpty());
}
}
| 5,705 | 31.982659 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/FakeRenewer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenRenewer;
public class FakeRenewer extends TokenRenewer {
static Token<?> lastRenewed = null;
static Token<?> lastCanceled = null;
static final Text KIND = new Text("TESTING-TOKEN-KIND");
@Override
public boolean handleKind(Text kind) {
return FakeRenewer.KIND.equals(kind);
}
@Override
public boolean isManaged(Token<?> token) throws IOException {
return true;
}
@Override
public long renew(Token<?> token, Configuration conf) {
lastRenewed = token;
return 0;
}
@Override
public void cancel(Token<?> token, Configuration conf) {
lastCanceled = token;
}
public static void reset() {
lastRenewed = null;
lastCanceled = null;
}
}
| 1,720 | 28.672414 | 74 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestDelegationTokenRemoteFetcher.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools;
import static org.jboss.netty.handler.codec.http.HttpMethod.GET;
import static org.jboss.netty.handler.codec.http.HttpResponseStatus.OK;
import static org.jboss.netty.handler.codec.http.HttpVersion.HTTP_1_1;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URISyntaxException;
import java.nio.charset.Charset;
import java.util.Iterator;
import java.util.Map;
import java.util.concurrent.Executors;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.tools.DelegationTokenFetcher;
import org.apache.hadoop.hdfs.web.URLConnectionFactory;
import org.apache.hadoop.hdfs.web.WebHdfsConstants;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.token.Token;
import org.apache.log4j.Logger;
import org.jboss.netty.bootstrap.ServerBootstrap;
import org.jboss.netty.buffer.ChannelBuffer;
import org.jboss.netty.buffer.ChannelBuffers;
import org.jboss.netty.channel.Channel;
import org.jboss.netty.channel.ChannelFutureListener;
import org.jboss.netty.channel.ChannelHandlerContext;
import org.jboss.netty.channel.ChannelPipeline;
import org.jboss.netty.channel.ChannelPipelineFactory;
import org.jboss.netty.channel.Channels;
import org.jboss.netty.channel.ExceptionEvent;
import org.jboss.netty.channel.MessageEvent;
import org.jboss.netty.channel.SimpleChannelUpstreamHandler;
import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory;
import org.jboss.netty.handler.codec.http.DefaultHttpResponse;
import org.jboss.netty.handler.codec.http.HttpChunkAggregator;
import org.jboss.netty.handler.codec.http.HttpHeaders;
import org.jboss.netty.handler.codec.http.HttpMethod;
import org.jboss.netty.handler.codec.http.HttpRequest;
import org.jboss.netty.handler.codec.http.HttpRequestDecoder;
import org.jboss.netty.handler.codec.http.HttpResponse;
import org.jboss.netty.handler.codec.http.HttpResponseEncoder;
import org.jboss.netty.handler.codec.http.HttpResponseStatus;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.UnmodifiableIterator;
public class TestDelegationTokenRemoteFetcher {
private static final Logger LOG = Logger
.getLogger(TestDelegationTokenRemoteFetcher.class);
private static final String EXP_DATE = "124123512361236";
private static final String tokenFile = "http.file.dta";
private static final URLConnectionFactory connectionFactory = URLConnectionFactory.DEFAULT_SYSTEM_CONNECTION_FACTORY;
private int httpPort;
private URI serviceUrl;
private FileSystem fileSys;
private Configuration conf;
private ServerBootstrap bootstrap;
private Token<DelegationTokenIdentifier> testToken;
private volatile AssertionError assertionError;
@Before
public void init() throws Exception {
conf = new Configuration();
fileSys = FileSystem.getLocal(conf);
httpPort = NetUtils.getFreeSocketPort();
serviceUrl = new URI("http://localhost:" + httpPort);
testToken = createToken(serviceUrl);
}
@After
public void clean() throws IOException {
if (fileSys != null)
fileSys.delete(new Path(tokenFile), true);
if (bootstrap != null)
bootstrap.releaseExternalResources();
}
/**
* try to fetch token without http server with IOException
*/
@Test
public void testTokenFetchFail() throws Exception {
try {
DelegationTokenFetcher.main(new String[] { "-webservice=" + serviceUrl,
tokenFile });
fail("Token fetcher shouldn't start in absense of NN");
} catch (IOException ex) {
}
}
/**
* try to fetch token without http server with IOException
*/
@Test
public void testTokenRenewFail() throws AuthenticationException {
try {
DelegationTokenFetcher.renewDelegationToken(connectionFactory, serviceUrl, testToken);
fail("Token fetcher shouldn't be able to renew tokens in absense of NN");
} catch (IOException ex) {
}
}
/**
* try cancel token without http server with IOException
*/
@Test
public void expectedTokenCancelFail() throws AuthenticationException {
try {
DelegationTokenFetcher.cancelDelegationToken(connectionFactory, serviceUrl, testToken);
fail("Token fetcher shouldn't be able to cancel tokens in absense of NN");
} catch (IOException ex) {
}
}
/**
* try fetch token and get http response with error
*/
@Test
public void expectedTokenRenewErrorHttpResponse()
throws AuthenticationException, URISyntaxException {
bootstrap = startHttpServer(httpPort, testToken, serviceUrl);
try {
DelegationTokenFetcher.renewDelegationToken(connectionFactory, new URI(
serviceUrl.toString() + "/exception"), createToken(serviceUrl));
fail("Token fetcher shouldn't be able to renew tokens using an invalid"
+ " NN URL");
} catch (IOException ex) {
}
if (assertionError != null)
throw assertionError;
}
/**
*
*/
@Test
public void testCancelTokenFromHttp() throws IOException,
AuthenticationException {
bootstrap = startHttpServer(httpPort, testToken, serviceUrl);
DelegationTokenFetcher.cancelDelegationToken(connectionFactory, serviceUrl,
testToken);
if (assertionError != null)
throw assertionError;
}
/**
* Call renew token using http server return new expiration time
*/
@Test
public void testRenewTokenFromHttp() throws IOException,
NumberFormatException, AuthenticationException {
bootstrap = startHttpServer(httpPort, testToken, serviceUrl);
assertTrue("testRenewTokenFromHttp error",
Long.parseLong(EXP_DATE) == DelegationTokenFetcher.renewDelegationToken(
connectionFactory, serviceUrl, testToken));
if (assertionError != null)
throw assertionError;
}
/**
* Call fetch token using http server
*/
@Test
public void expectedTokenIsRetrievedFromHttp() throws Exception {
bootstrap = startHttpServer(httpPort, testToken, serviceUrl);
DelegationTokenFetcher.main(new String[] { "-webservice=" + serviceUrl,
tokenFile });
Path p = new Path(fileSys.getWorkingDirectory(), tokenFile);
Credentials creds = Credentials.readTokenStorageFile(p, conf);
Iterator<Token<?>> itr = creds.getAllTokens().iterator();
assertTrue("token not exist error", itr.hasNext());
Token<?> fetchedToken = itr.next();
Assert.assertArrayEquals("token wrong identifier error",
testToken.getIdentifier(), fetchedToken.getIdentifier());
Assert.assertArrayEquals("token wrong password error",
testToken.getPassword(), fetchedToken.getPassword());
if (assertionError != null)
throw assertionError;
}
private static Token<DelegationTokenIdentifier> createToken(URI serviceUri) {
byte[] pw = "hadoop".getBytes();
byte[] ident = new DelegationTokenIdentifier(new Text("owner"), new Text(
"renewer"), new Text("realuser")).getBytes();
Text service = new Text(serviceUri.toString());
return new Token<DelegationTokenIdentifier>(ident, pw,
WebHdfsConstants.HFTP_TOKEN_KIND, service);
}
private interface Handler {
void handle(Channel channel, Token<DelegationTokenIdentifier> token,
String serviceUrl) throws IOException;
}
private class FetchHandler implements Handler {
@Override
public void handle(Channel channel, Token<DelegationTokenIdentifier> token,
String serviceUrl) throws IOException {
Assert.assertEquals(testToken, token);
Credentials creds = new Credentials();
creds.addToken(new Text(serviceUrl), token);
DataOutputBuffer out = new DataOutputBuffer();
creds.write(out);
int fileLength = out.getData().length;
ChannelBuffer cbuffer = ChannelBuffers.buffer(fileLength);
cbuffer.writeBytes(out.getData());
HttpResponse response = new DefaultHttpResponse(HTTP_1_1, OK);
response.setHeader(HttpHeaders.Names.CONTENT_LENGTH,
String.valueOf(fileLength));
response.setContent(cbuffer);
channel.write(response).addListener(ChannelFutureListener.CLOSE);
}
}
private class RenewHandler implements Handler {
@Override
public void handle(Channel channel, Token<DelegationTokenIdentifier> token,
String serviceUrl) throws IOException {
Assert.assertEquals(testToken, token);
byte[] bytes = EXP_DATE.getBytes();
ChannelBuffer cbuffer = ChannelBuffers.buffer(bytes.length);
cbuffer.writeBytes(bytes);
HttpResponse response = new DefaultHttpResponse(HTTP_1_1, OK);
response.setHeader(HttpHeaders.Names.CONTENT_LENGTH,
String.valueOf(bytes.length));
response.setContent(cbuffer);
channel.write(response).addListener(ChannelFutureListener.CLOSE);
}
}
private class ExceptionHandler implements Handler {
@Override
public void handle(Channel channel, Token<DelegationTokenIdentifier> token,
String serviceUrl) throws IOException {
Assert.assertEquals(testToken, token);
HttpResponse response = new DefaultHttpResponse(HTTP_1_1,
HttpResponseStatus.METHOD_NOT_ALLOWED);
channel.write(response).addListener(ChannelFutureListener.CLOSE);
}
}
private class CancelHandler implements Handler {
@Override
public void handle(Channel channel, Token<DelegationTokenIdentifier> token,
String serviceUrl) throws IOException {
Assert.assertEquals(testToken, token);
HttpResponse response = new DefaultHttpResponse(HTTP_1_1, OK);
channel.write(response).addListener(ChannelFutureListener.CLOSE);
}
}
private final class CredentialsLogicHandler extends
SimpleChannelUpstreamHandler {
private final Token<DelegationTokenIdentifier> token;
private final String serviceUrl;
private final ImmutableMap<String, Handler> routes = ImmutableMap.of(
"/exception", new ExceptionHandler(),
"/cancelDelegationToken", new CancelHandler(),
"/getDelegationToken", new FetchHandler() ,
"/renewDelegationToken", new RenewHandler());
public CredentialsLogicHandler(Token<DelegationTokenIdentifier> token,
String serviceUrl) {
this.token = token;
this.serviceUrl = serviceUrl;
}
@Override
public void messageReceived(ChannelHandlerContext ctx, final MessageEvent e)
throws Exception {
HttpRequest request = (HttpRequest) e.getMessage();
if (request.getMethod() == HttpMethod.OPTIONS) {
// Mimic SPNEGO authentication
HttpResponse response = new DefaultHttpResponse(HTTP_1_1,
HttpResponseStatus.OK);
response.addHeader("Set-Cookie", "hadoop-auth=1234");
e.getChannel().write(response).addListener(ChannelFutureListener.CLOSE);
} else if (request.getMethod() != GET) {
e.getChannel().close();
}
UnmodifiableIterator<Map.Entry<String, Handler>> iter = routes.entrySet()
.iterator();
while (iter.hasNext()) {
Map.Entry<String, Handler> entry = iter.next();
if (request.getUri().contains(entry.getKey())) {
Handler handler = entry.getValue();
try {
handler.handle(e.getChannel(), token, serviceUrl);
} catch (AssertionError ee) {
TestDelegationTokenRemoteFetcher.this.assertionError = ee;
HttpResponse response = new DefaultHttpResponse(HTTP_1_1,
HttpResponseStatus.BAD_REQUEST);
response.setContent(ChannelBuffers.copiedBuffer(ee.getMessage(),
Charset.defaultCharset()));
e.getChannel().write(response).addListener(ChannelFutureListener.CLOSE);
}
return;
}
}
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e)
throws Exception {
Channel ch = e.getChannel();
Throwable cause = e.getCause();
if (LOG.isDebugEnabled())
LOG.debug(cause.getMessage());
ch.close().addListener(ChannelFutureListener.CLOSE);
}
}
private ServerBootstrap startHttpServer(int port,
final Token<DelegationTokenIdentifier> token, final URI url) {
ServerBootstrap bootstrap = new ServerBootstrap(
new NioServerSocketChannelFactory(Executors.newCachedThreadPool(),
Executors.newCachedThreadPool()));
bootstrap.setPipelineFactory(new ChannelPipelineFactory() {
@Override
public ChannelPipeline getPipeline() throws Exception {
return Channels.pipeline(new HttpRequestDecoder(),
new HttpChunkAggregator(65536), new HttpResponseEncoder(),
new CredentialsLogicHandler(token, url.toString()));
}
});
bootstrap.bind(new InetSocketAddress("localhost", port));
return bootstrap;
}
}
| 14,217 | 36.914667 | 119 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.util.StringUtils;
import org.junit.Test;
/** Unit tests for permission */
public class TestPermission {
public static final Log LOG = LogFactory.getLog(TestPermission.class);
final private static Path ROOT_PATH = new Path("/data");
final private static Path CHILD_DIR1 = new Path(ROOT_PATH, "child1");
final private static Path CHILD_DIR2 = new Path(ROOT_PATH, "child2");
final private static Path CHILD_FILE1 = new Path(ROOT_PATH, "file1");
final private static Path CHILD_FILE2 = new Path(ROOT_PATH, "file2");
final private static int FILE_LEN = 100;
final private static Random RAN = new Random();
final private static String USER_NAME = "user" + RAN.nextInt();
final private static String[] GROUP_NAMES = {"group1", "group2"};
static FsPermission checkPermission(FileSystem fs,
String path, FsPermission expected) throws IOException {
FileStatus s = fs.getFileStatus(new Path(path));
LOG.info(s.getPath() + ": " + s.isDirectory() + " " + s.getPermission()
+ ":" + s.getOwner() + ":" + s.getGroup());
if (expected != null) {
assertEquals(expected, s.getPermission());
assertEquals(expected.toShort(), s.getPermission().toShort());
}
return s.getPermission();
}
/**
* Tests backward compatibility. Configuration can be
* either set with old param dfs.umask that takes decimal umasks
* or dfs.umaskmode that takes symbolic or octal umask.
*/
@Test
public void testBackwardCompatibility() {
// Test 1 - old configuration key with decimal
// umask value should be handled when set using
// FSPermission.setUMask() API
FsPermission perm = new FsPermission((short)18);
Configuration conf = new Configuration();
FsPermission.setUMask(conf, perm);
assertEquals(18, FsPermission.getUMask(conf).toShort());
// Test 2 - old configuration key set with decimal
// umask value should be handled
perm = new FsPermission((short)18);
conf = new Configuration();
conf.set(FsPermission.DEPRECATED_UMASK_LABEL, "18");
assertEquals(18, FsPermission.getUMask(conf).toShort());
// Test 3 - old configuration key overrides the new one
conf = new Configuration();
conf.set(FsPermission.DEPRECATED_UMASK_LABEL, "18");
conf.set(FsPermission.UMASK_LABEL, "000");
assertEquals(18, FsPermission.getUMask(conf).toShort());
// Test 4 - new configuration key is handled
conf = new Configuration();
conf.set(FsPermission.UMASK_LABEL, "022");
assertEquals(18, FsPermission.getUMask(conf).toShort());
// Test 5 - equivalent valid umask
conf = new Configuration();
conf.set(FsPermission.UMASK_LABEL, "0022");
assertEquals(18, FsPermission.getUMask(conf).toShort());
// Test 6 - invalid umask
conf = new Configuration();
conf.set(FsPermission.UMASK_LABEL, "1222");
try {
FsPermission.getUMask(conf);
fail("expect IllegalArgumentException happen");
} catch (IllegalArgumentException e) {
//pass, exception successfully trigger
}
// Test 7 - invalid umask
conf = new Configuration();
conf.set(FsPermission.UMASK_LABEL, "01222");
try {
FsPermission.getUMask(conf);
fail("expect IllegalArgumentException happen");
} catch (IllegalArgumentException e) {
//pass, exception successfully trigger
}
}
@Test
public void testCreate() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
conf.set(FsPermission.UMASK_LABEL, "000");
MiniDFSCluster cluster = null;
FileSystem fs = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
fs = FileSystem.get(conf);
FsPermission rootPerm = checkPermission(fs, "/", null);
FsPermission inheritPerm = FsPermission.createImmutable(
(short)(rootPerm.toShort() | 0300));
FsPermission dirPerm = new FsPermission((short)0777);
fs.mkdirs(new Path("/a1/a2/a3"), dirPerm);
checkPermission(fs, "/a1", dirPerm);
checkPermission(fs, "/a1/a2", dirPerm);
checkPermission(fs, "/a1/a2/a3", dirPerm);
dirPerm = new FsPermission((short)0123);
FsPermission permission = FsPermission.createImmutable(
(short)(dirPerm.toShort() | 0300));
fs.mkdirs(new Path("/aa/1/aa/2/aa/3"), dirPerm);
checkPermission(fs, "/aa/1", permission);
checkPermission(fs, "/aa/1/aa/2", permission);
checkPermission(fs, "/aa/1/aa/2/aa/3", dirPerm);
FsPermission filePerm = new FsPermission((short)0444);
Path p = new Path("/b1/b2/b3.txt");
FSDataOutputStream out = fs.create(p, filePerm,
true, conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
fs.getDefaultReplication(p), fs.getDefaultBlockSize(p), null);
out.write(123);
out.close();
checkPermission(fs, "/b1", inheritPerm);
checkPermission(fs, "/b1/b2", inheritPerm);
checkPermission(fs, "/b1/b2/b3.txt", filePerm);
conf.set(FsPermission.UMASK_LABEL, "022");
permission =
FsPermission.createImmutable((short)0666);
FileSystem.mkdirs(fs, new Path("/c1"), new FsPermission(permission));
FileSystem.create(fs, new Path("/c1/c2.txt"),
new FsPermission(permission));
checkPermission(fs, "/c1", permission);
checkPermission(fs, "/c1/c2.txt", permission);
} finally {
try {
if(fs != null) fs.close();
} catch(Exception e) {
LOG.error(StringUtils.stringifyException(e));
}
try {
if(cluster != null) cluster.shutdown();
} catch(Exception e) {
LOG.error(StringUtils.stringifyException(e));
}
}
}
@Test
public void testFilePermision() throws Exception {
final Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
try {
FileSystem nnfs = FileSystem.get(conf);
// test permissions on files that do not exist
assertFalse(nnfs.exists(CHILD_FILE1));
try {
nnfs.setOwner(CHILD_FILE1, "foo", "bar");
assertTrue(false);
}
catch(java.io.FileNotFoundException e) {
LOG.info("GOOD: got " + e);
}
try {
nnfs.setPermission(CHILD_FILE1, new FsPermission((short)0777));
assertTrue(false);
}
catch(java.io.FileNotFoundException e) {
LOG.info("GOOD: got " + e);
}
// make sure nn can take user specified permission (with default fs
// permission umask applied)
FSDataOutputStream out = nnfs.create(CHILD_FILE1, new FsPermission(
(short) 0777), true, 1024, (short) 1, 1024, null);
FileStatus status = nnfs.getFileStatus(CHILD_FILE1);
// FS_PERMISSIONS_UMASK_DEFAULT is 0022
assertTrue(status.getPermission().toString().equals("rwxr-xr-x"));
nnfs.delete(CHILD_FILE1, false);
// following dir/file creations are legal
nnfs.mkdirs(CHILD_DIR1);
out = nnfs.create(CHILD_FILE1);
status = nnfs.getFileStatus(CHILD_FILE1);
assertTrue(status.getPermission().toString().equals("rw-r--r--"));
byte data[] = new byte[FILE_LEN];
RAN.nextBytes(data);
out.write(data);
out.close();
nnfs.setPermission(CHILD_FILE1, new FsPermission("700"));
status = nnfs.getFileStatus(CHILD_FILE1);
assertTrue(status.getPermission().toString().equals("rwx------"));
// following read is legal
byte dataIn[] = new byte[FILE_LEN];
FSDataInputStream fin = nnfs.open(CHILD_FILE1);
int bytesRead = fin.read(dataIn);
assertTrue(bytesRead == FILE_LEN);
for(int i=0; i<FILE_LEN; i++) {
assertEquals(data[i], dataIn[i]);
}
// test execution bit support for files
nnfs.setPermission(CHILD_FILE1, new FsPermission("755"));
status = nnfs.getFileStatus(CHILD_FILE1);
assertTrue(status.getPermission().toString().equals("rwxr-xr-x"));
nnfs.setPermission(CHILD_FILE1, new FsPermission("744"));
status = nnfs.getFileStatus(CHILD_FILE1);
assertTrue(status.getPermission().toString().equals("rwxr--r--"));
nnfs.setPermission(CHILD_FILE1, new FsPermission("700"));
////////////////////////////////////////////////////////////////
// test illegal file/dir creation
UserGroupInformation userGroupInfo =
UserGroupInformation.createUserForTesting(USER_NAME, GROUP_NAMES );
FileSystem userfs = DFSTestUtil.getFileSystemAs(userGroupInfo, conf);
// make sure mkdir of a existing directory that is not owned by
// this user does not throw an exception.
userfs.mkdirs(CHILD_DIR1);
// illegal mkdir
assertTrue(!canMkdirs(userfs, CHILD_DIR2));
// illegal file creation
assertTrue(!canCreate(userfs, CHILD_FILE2));
// illegal file open
assertTrue(!canOpen(userfs, CHILD_FILE1));
nnfs.setPermission(ROOT_PATH, new FsPermission((short)0755));
nnfs.setPermission(CHILD_DIR1, new FsPermission("777"));
nnfs.setPermission(new Path("/"), new FsPermission((short)0777));
final Path RENAME_PATH = new Path("/foo/bar");
userfs.mkdirs(RENAME_PATH);
assertTrue(canRename(userfs, RENAME_PATH, CHILD_DIR1));
} finally {
cluster.shutdown();
}
}
static boolean canMkdirs(FileSystem fs, Path p) throws IOException {
try {
fs.mkdirs(p);
return true;
} catch(AccessControlException e) {
return false;
}
}
static boolean canCreate(FileSystem fs, Path p) throws IOException {
try {
fs.create(p);
return true;
} catch(AccessControlException e) {
return false;
}
}
static boolean canOpen(FileSystem fs, Path p) throws IOException {
try {
fs.open(p);
return true;
} catch(AccessControlException e) {
return false;
}
}
static boolean canRename(FileSystem fs, Path src, Path dst
) throws IOException {
try {
fs.rename(src, dst);
return true;
} catch(AccessControlException e) {
return false;
}
}
}
| 12,059 | 35.768293 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermissionSymlinks.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import static org.apache.hadoop.fs.permission.AclEntryScope.*;
import static org.apache.hadoop.fs.permission.AclEntryType.*;
import static org.apache.hadoop.fs.permission.FsAction.*;
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.io.FileNotFoundException;
import java.security.PrivilegedExceptionAction;
import java.util.Arrays;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestWrapper;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestPermissionSymlinks {
private static final Log LOG = LogFactory.getLog(TestPermissionSymlinks.class);
private static final Configuration conf = new HdfsConfiguration();
// Non-super user to run commands with
private static final UserGroupInformation user = UserGroupInformation
.createRemoteUser("myuser");
private static final Path linkParent = new Path("/symtest1");
private static final Path targetParent = new Path("/symtest2");
private static final Path link = new Path(linkParent, "link");
private static final Path target = new Path(targetParent, "target");
private static MiniDFSCluster cluster;
private static FileSystem fs;
private static FileSystemTestWrapper wrapper;
@BeforeClass
public static void beforeClassSetUp() throws Exception {
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
conf.set(FsPermission.UMASK_LABEL, "000");
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
fs = cluster.getFileSystem();
wrapper = new FileSystemTestWrapper(fs);
}
@AfterClass
public static void afterClassTearDown() throws Exception {
if (fs != null) {
fs.close();
}
if (cluster != null) {
cluster.shutdown();
}
}
@Before
public void setUp() throws Exception {
// Create initial test files
fs.mkdirs(linkParent);
fs.mkdirs(targetParent);
DFSTestUtil.createFile(fs, target, 1024, (short)3, 0xBEEFl);
wrapper.createSymlink(target, link, false);
}
@After
public void tearDown() throws Exception {
// Wipe out everything
fs.delete(linkParent, true);
fs.delete(targetParent, true);
}
@Test(timeout = 5000)
public void testDelete() throws Exception {
fs.setPermission(linkParent, new FsPermission((short) 0555));
doDeleteLinkParentNotWritable();
fs.setPermission(linkParent, new FsPermission((short) 0777));
fs.setPermission(targetParent, new FsPermission((short) 0555));
fs.setPermission(target, new FsPermission((short) 0555));
doDeleteTargetParentAndTargetNotWritable();
}
@Test
public void testAclDelete() throws Exception {
fs.setAcl(linkParent, Arrays.asList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE),
aclEntry(ACCESS, GROUP, ALL),
aclEntry(ACCESS, OTHER, ALL)));
doDeleteLinkParentNotWritable();
fs.setAcl(linkParent, Arrays.asList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, GROUP, ALL),
aclEntry(ACCESS, OTHER, ALL)));
fs.setAcl(targetParent, Arrays.asList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE),
aclEntry(ACCESS, GROUP, ALL),
aclEntry(ACCESS, OTHER, ALL)));
fs.setAcl(target, Arrays.asList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE),
aclEntry(ACCESS, GROUP, ALL),
aclEntry(ACCESS, OTHER, ALL)));
doDeleteTargetParentAndTargetNotWritable();
}
private void doDeleteLinkParentNotWritable() throws Exception {
// Try to delete where the symlink's parent dir is not writable
try {
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws IOException {
FileContext myfc = FileContext.getFileContext(conf);
myfc.delete(link, false);
return null;
}
});
fail("Deleted symlink without write permissions on parent!");
} catch (AccessControlException e) {
GenericTestUtils.assertExceptionContains("Permission denied", e);
}
}
private void doDeleteTargetParentAndTargetNotWritable() throws Exception {
// Try a delete where the symlink parent dir is writable,
// but the target's parent and target are not
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws IOException {
FileContext myfc = FileContext.getFileContext(conf);
myfc.delete(link, false);
return null;
}
});
// Make sure only the link was deleted
assertTrue("Target should not have been deleted!",
wrapper.exists(target));
assertFalse("Link should have been deleted!",
wrapper.exists(link));
}
@Test(timeout = 5000)
public void testReadWhenTargetNotReadable() throws Exception {
fs.setPermission(target, new FsPermission((short) 0000));
doReadTargetNotReadable();
}
@Test
public void testAclReadTargetNotReadable() throws Exception {
fs.setAcl(target, Arrays.asList(
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, USER, user.getUserName(), NONE),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, OTHER, READ)));
doReadTargetNotReadable();
}
private void doReadTargetNotReadable() throws Exception {
try {
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws IOException {
FileContext myfc = FileContext.getFileContext(conf);
myfc.open(link).read();
return null;
}
});
fail("Read link target even though target does not have"
+ " read permissions!");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("Permission denied", e);
}
}
@Test(timeout = 5000)
public void testFileStatus() throws Exception {
fs.setPermission(target, new FsPermission((short) 0000));
doGetFileLinkStatusTargetNotReadable();
}
@Test
public void testAclGetFileLinkStatusTargetNotReadable() throws Exception {
fs.setAcl(target, Arrays.asList(
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, USER, user.getUserName(), NONE),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, OTHER, READ)));
doGetFileLinkStatusTargetNotReadable();
}
private void doGetFileLinkStatusTargetNotReadable() throws Exception {
// Try to getFileLinkStatus the link when the target is not readable
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws IOException {
FileContext myfc = FileContext.getFileContext(conf);
FileStatus stat = myfc.getFileLinkStatus(link);
assertEquals("Expected link's FileStatus path to match link!",
link.makeQualified(fs.getUri(), fs.getWorkingDirectory()), stat.getPath());
Path linkTarget = myfc.getLinkTarget(link);
assertEquals("Expected link's target to match target!",
target, linkTarget);
return null;
}
});
}
@Test(timeout = 5000)
public void testRenameLinkTargetNotWritableFC() throws Exception {
fs.setPermission(target, new FsPermission((short) 0555));
fs.setPermission(targetParent, new FsPermission((short) 0555));
doRenameLinkTargetNotWritableFC();
}
@Test
public void testAclRenameTargetNotWritableFC() throws Exception {
fs.setAcl(target, Arrays.asList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE),
aclEntry(ACCESS, GROUP, ALL),
aclEntry(ACCESS, OTHER, ALL)));
fs.setAcl(targetParent, Arrays.asList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE),
aclEntry(ACCESS, GROUP, ALL),
aclEntry(ACCESS, OTHER, ALL)));
doRenameLinkTargetNotWritableFC();
}
private void doRenameLinkTargetNotWritableFC() throws Exception {
// Rename the link when the target and parent are not writable
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws IOException {
// First FileContext
FileContext myfc = FileContext.getFileContext(conf);
Path newlink = new Path(linkParent, "newlink");
myfc.rename(link, newlink, Rename.NONE);
Path linkTarget = myfc.getLinkTarget(newlink);
assertEquals("Expected link's target to match target!",
target, linkTarget);
return null;
}
});
assertTrue("Expected target to exist", wrapper.exists(target));
}
@Test(timeout = 5000)
public void testRenameSrcNotWritableFC() throws Exception {
fs.setPermission(linkParent, new FsPermission((short) 0555));
doRenameSrcNotWritableFC();
}
@Test
public void testAclRenameSrcNotWritableFC() throws Exception {
fs.setAcl(linkParent, Arrays.asList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE),
aclEntry(ACCESS, GROUP, ALL),
aclEntry(ACCESS, OTHER, ALL)));
doRenameSrcNotWritableFC();
}
private void doRenameSrcNotWritableFC() throws Exception {
// Rename the link when the target and parent are not writable
try {
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws IOException {
FileContext myfc = FileContext.getFileContext(conf);
Path newlink = new Path(targetParent, "newlink");
myfc.rename(link, newlink, Rename.NONE);
return null;
}
});
fail("Renamed link even though link's parent is not writable!");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("Permission denied", e);
}
}
// Need separate FileSystem tests since the server-side impl is different
// See {@link ClientProtocol#rename} and {@link ClientProtocol#rename2}.
@Test(timeout = 5000)
public void testRenameLinkTargetNotWritableFS() throws Exception {
fs.setPermission(target, new FsPermission((short) 0555));
fs.setPermission(targetParent, new FsPermission((short) 0555));
doRenameLinkTargetNotWritableFS();
}
@Test
public void testAclRenameTargetNotWritableFS() throws Exception {
fs.setAcl(target, Arrays.asList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE),
aclEntry(ACCESS, GROUP, ALL),
aclEntry(ACCESS, OTHER, ALL)));
fs.setAcl(targetParent, Arrays.asList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE),
aclEntry(ACCESS, GROUP, ALL),
aclEntry(ACCESS, OTHER, ALL)));
doRenameLinkTargetNotWritableFS();
}
private void doRenameLinkTargetNotWritableFS() throws Exception {
// Rename the link when the target and parent are not writable
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws IOException {
// First FileContext
FileSystem myfs = FileSystem.get(conf);
Path newlink = new Path(linkParent, "newlink");
myfs.rename(link, newlink);
Path linkTarget = myfs.getLinkTarget(newlink);
assertEquals("Expected link's target to match target!",
target, linkTarget);
return null;
}
});
assertTrue("Expected target to exist", wrapper.exists(target));
}
@Test(timeout = 5000)
public void testRenameSrcNotWritableFS() throws Exception {
fs.setPermission(linkParent, new FsPermission((short) 0555));
doRenameSrcNotWritableFS();
}
@Test
public void testAclRenameSrcNotWritableFS() throws Exception {
fs.setAcl(linkParent, Arrays.asList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, user.getUserName(), READ_EXECUTE),
aclEntry(ACCESS, GROUP, ALL),
aclEntry(ACCESS, OTHER, ALL)));
doRenameSrcNotWritableFS();
}
private void doRenameSrcNotWritableFS() throws Exception {
// Rename the link when the target and parent are not writable
try {
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws IOException {
FileSystem myfs = FileSystem.get(conf);
Path newlink = new Path(targetParent, "newlink");
myfs.rename(link, newlink);
return null;
}
});
fail("Renamed link even though link's parent is not writable!");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("Permission denied", e);
}
}
@Test
public void testAccess() throws Exception {
fs.setPermission(target, new FsPermission((short) 0002));
fs.setAcl(target, Arrays.asList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, GROUP, NONE),
aclEntry(ACCESS, USER, user.getShortUserName(), WRITE),
aclEntry(ACCESS, OTHER, WRITE)));
FileContext myfc = user.doAs(new PrivilegedExceptionAction<FileContext>() {
@Override
public FileContext run() throws IOException {
return FileContext.getFileContext(conf);
}
});
// Path to targetChild via symlink
myfc.access(link, FsAction.WRITE);
try {
myfc.access(link, FsAction.ALL);
fail("The access call should have failed.");
} catch (AccessControlException e) {
// expected
}
Path badPath = new Path(link, "bad");
try {
myfc.access(badPath, FsAction.READ);
fail("The access call should have failed");
} catch (FileNotFoundException e) {
// expected
}
}
}
| 15,471 | 34.814815 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.PrintWriter;
import java.io.UnsupportedEncodingException;
import java.net.URL;
import java.net.URLDecoder;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.tools.DFSAdmin;
import org.apache.hadoop.security.authorize.AuthorizationException;
import org.apache.hadoop.security.authorize.DefaultImpersonationProvider;
import org.apache.hadoop.security.authorize.ProxyUsers;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class TestRefreshUserMappings {
private MiniDFSCluster cluster;
Configuration config;
private static final long groupRefreshTimeoutSec = 1;
private String tempResource = null;
public static class MockUnixGroupsMapping implements GroupMappingServiceProvider {
private int i=0;
@Override
public List<String> getGroups(String user) throws IOException {
System.out.println("Getting groups in MockUnixGroupsMapping");
String g1 = user + (10 * i + 1);
String g2 = user + (10 * i + 2);
List<String> l = new ArrayList<String>(2);
l.add(g1);
l.add(g2);
i++;
return l;
}
@Override
public void cacheGroupsRefresh() throws IOException {
System.out.println("Refreshing groups in MockUnixGroupsMapping");
}
@Override
public void cacheGroupsAdd(List<String> groups) throws IOException {
}
}
@Before
public void setUp() throws Exception {
config = new Configuration();
config.setClass("hadoop.security.group.mapping",
TestRefreshUserMappings.MockUnixGroupsMapping.class,
GroupMappingServiceProvider.class);
config.setLong("hadoop.security.groups.cache.secs", groupRefreshTimeoutSec);
Groups.getUserToGroupsMappingService(config);
FileSystem.setDefaultUri(config, "hdfs://localhost:" + "0");
cluster = new MiniDFSCluster.Builder(config).build();
cluster.waitActive();
}
@After
public void tearDown() throws Exception {
if(cluster!=null) {
cluster.shutdown();
}
if(tempResource!=null) {
File f = new File(tempResource);
f.delete();
}
}
@Test
public void testGroupMappingRefresh() throws Exception {
DFSAdmin admin = new DFSAdmin(config);
String [] args = new String[]{"-refreshUserToGroupsMappings"};
Groups groups = Groups.getUserToGroupsMappingService(config);
String user = UserGroupInformation.getCurrentUser().getUserName();
System.out.println("first attempt:");
List<String> g1 = groups.getGroups(user);
String [] str_groups = new String [g1.size()];
g1.toArray(str_groups);
System.out.println(Arrays.toString(str_groups));
System.out.println("second attempt, should be same:");
List<String> g2 = groups.getGroups(user);
g2.toArray(str_groups);
System.out.println(Arrays.toString(str_groups));
for(int i=0; i<g2.size(); i++) {
assertEquals("Should be same group ", g1.get(i), g2.get(i));
}
admin.run(args);
System.out.println("third attempt(after refresh command), should be different:");
List<String> g3 = groups.getGroups(user);
g3.toArray(str_groups);
System.out.println(Arrays.toString(str_groups));
for(int i=0; i<g3.size(); i++) {
assertFalse("Should be different group: " + g1.get(i) + " and " + g3.get(i),
g1.get(i).equals(g3.get(i)));
}
// test time out
Thread.sleep(groupRefreshTimeoutSec*1100);
System.out.println("fourth attempt(after timeout), should be different:");
List<String> g4 = groups.getGroups(user);
g4.toArray(str_groups);
System.out.println(Arrays.toString(str_groups));
for(int i=0; i<g4.size(); i++) {
assertFalse("Should be different group ", g3.get(i).equals(g4.get(i)));
}
}
@Test
public void testRefreshSuperUserGroupsConfiguration() throws Exception {
final String SUPER_USER = "super_user";
final String [] GROUP_NAMES1 = new String [] {"gr1" , "gr2"};
final String [] GROUP_NAMES2 = new String [] {"gr3" , "gr4"};
//keys in conf
String userKeyGroups = DefaultImpersonationProvider.getTestProvider().
getProxySuperuserGroupConfKey(SUPER_USER);
String userKeyHosts = DefaultImpersonationProvider.getTestProvider().
getProxySuperuserIpConfKey (SUPER_USER);
config.set(userKeyGroups, "gr3,gr4,gr5"); // superuser can proxy for this group
config.set(userKeyHosts,"127.0.0.1");
ProxyUsers.refreshSuperUserGroupsConfiguration(config);
UserGroupInformation ugi1 = mock(UserGroupInformation.class);
UserGroupInformation ugi2 = mock(UserGroupInformation.class);
UserGroupInformation suUgi = mock(UserGroupInformation.class);
when(ugi1.getRealUser()).thenReturn(suUgi);
when(ugi2.getRealUser()).thenReturn(suUgi);
when(suUgi.getShortUserName()).thenReturn(SUPER_USER); // super user
when(suUgi.getUserName()).thenReturn(SUPER_USER+"L"); // super user
when(ugi1.getShortUserName()).thenReturn("user1");
when(ugi2.getShortUserName()).thenReturn("user2");
when(ugi1.getUserName()).thenReturn("userL1");
when(ugi2.getUserName()).thenReturn("userL2");
// set groups for users
when(ugi1.getGroupNames()).thenReturn(GROUP_NAMES1);
when(ugi2.getGroupNames()).thenReturn(GROUP_NAMES2);
// check before
try {
ProxyUsers.authorize(ugi1, "127.0.0.1");
fail("first auth for " + ugi1.getShortUserName() + " should've failed ");
} catch (AuthorizationException e) {
// expected
System.err.println("auth for " + ugi1.getUserName() + " failed");
}
try {
ProxyUsers.authorize(ugi2, "127.0.0.1");
System.err.println("auth for " + ugi2.getUserName() + " succeeded");
// expected
} catch (AuthorizationException e) {
fail("first auth for " + ugi2.getShortUserName() + " should've succeeded: " + e.getLocalizedMessage());
}
// refresh will look at configuration on the server side
// add additional resource with the new value
// so the server side will pick it up
String rsrc = "testGroupMappingRefresh_rsrc.xml";
addNewConfigResource(rsrc, userKeyGroups, "gr2", userKeyHosts, "127.0.0.1");
DFSAdmin admin = new DFSAdmin(config);
String [] args = new String[]{"-refreshSuperUserGroupsConfiguration"};
admin.run(args);
try {
ProxyUsers.authorize(ugi2, "127.0.0.1");
fail("second auth for " + ugi2.getShortUserName() + " should've failed ");
} catch (AuthorizationException e) {
// expected
System.err.println("auth for " + ugi2.getUserName() + " failed");
}
try {
ProxyUsers.authorize(ugi1, "127.0.0.1");
System.err.println("auth for " + ugi1.getUserName() + " succeeded");
// expected
} catch (AuthorizationException e) {
fail("second auth for " + ugi1.getShortUserName() + " should've succeeded: " + e.getLocalizedMessage());
}
}
private void addNewConfigResource(String rsrcName, String keyGroup,
String groups, String keyHosts, String hosts)
throws FileNotFoundException, UnsupportedEncodingException {
// location for temp resource should be in CLASSPATH
Configuration conf = new Configuration();
URL url = conf.getResource("hdfs-site.xml");
String urlPath = URLDecoder.decode(url.getPath().toString(), "UTF-8");
Path p = new Path(urlPath);
Path dir = p.getParent();
tempResource = dir.toString() + "/" + rsrcName;
String newResource =
"<configuration>"+
"<property><name>" + keyGroup + "</name><value>"+groups+"</value></property>" +
"<property><name>" + keyHosts + "</name><value>"+hosts+"</value></property>" +
"</configuration>";
PrintWriter writer = new PrintWriter(new FileOutputStream(tempResource));
writer.println(newResource);
writer.close();
Configuration.addDefaultResource(rsrcName);
}
}
| 9,297 | 35.750988 | 110 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/MiniDFSClusterManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.IOException;
import java.util.Map;
import java.util.TreeMap;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.mortbay.util.ajax.JSON;
/**
* This class drives the creation of a mini-cluster on the local machine. By
* default, a MiniDFSCluster is spawned on the first available ports that are
* found.
*
* A series of command line flags controls the startup cluster options.
*
* This class can dump a Hadoop configuration and some basic metadata (in JSON)
* into a textfile.
*
* To shutdown the cluster, kill the process.
*
* To run this from the command line, do the following (replacing the jar
* version as appropriate):
*
* $HADOOP_HOME/bin/hadoop jar $HADOOP_HOME/share/hadoop/hdfs/hadoop-hdfs-0.24.0-SNAPSHOT-tests.jar org.apache.hadoop.test.MiniDFSClusterManager -options...
*/
public class MiniDFSClusterManager {
private static final Log LOG =
LogFactory.getLog(MiniDFSClusterManager.class);
private MiniDFSCluster dfs;
private String writeDetails;
private int numDataNodes;
private int nameNodePort;
private int nameNodeHttpPort;
private StartupOption dfsOpts;
private String writeConfig;
private Configuration conf;
private boolean format;
private static final long SLEEP_INTERVAL_MS = 1000 * 60;
/**
* Creates configuration options object.
*/
@SuppressWarnings("static-access")
private Options makeOptions() {
Options options = new Options();
options
.addOption("datanodes", true, "How many datanodes to start (default 1)")
.addOption("format", false, "Format the DFS (default false)")
.addOption("cmdport", true,
"Which port to listen on for commands (default 0--we choose)")
.addOption("nnport", true, "NameNode port (default 0--we choose)")
.addOption("httpport", true, "NameNode http port (default 0--we choose)")
.addOption("namenode", true, "URL of the namenode (default "
+ "is either the DFS cluster or a temporary dir)")
.addOption(OptionBuilder
.hasArgs()
.withArgName("property=value")
.withDescription("Options to pass into configuration object")
.create("D"))
.addOption(OptionBuilder
.hasArg()
.withArgName("path")
.withDescription("Save configuration to this XML file.")
.create("writeConfig"))
.addOption(OptionBuilder
.hasArg()
.withArgName("path")
.withDescription("Write basic information to this JSON file.")
.create("writeDetails"))
.addOption(OptionBuilder.withDescription("Prints option help.")
.create("help"));
return options;
}
/**
* Main entry-point.
*/
public void run(String[] args) throws IOException {
if (!parseArguments(args)) {
return;
}
start();
sleepForever();
}
private void sleepForever() {
while (true) {
try {
Thread.sleep(SLEEP_INTERVAL_MS);
if (!dfs.isClusterUp()) {
LOG.info("Cluster is no longer up, exiting");
return;
}
} catch (InterruptedException _) {
// nothing
}
}
}
/**
* Starts DFS as specified in member-variable options. Also writes out
* configuration and details, if requested.
*/
public void start() throws IOException, FileNotFoundException {
dfs = new MiniDFSCluster.Builder(conf).nameNodePort(nameNodePort)
.nameNodeHttpPort(nameNodeHttpPort)
.numDataNodes(numDataNodes)
.startupOption(dfsOpts)
.format(format)
.build();
dfs.waitActive();
LOG.info("Started MiniDFSCluster -- namenode on port "
+ dfs.getNameNodePort());
if (writeConfig != null) {
FileOutputStream fos = new FileOutputStream(new File(writeConfig));
conf.writeXml(fos);
fos.close();
}
if (writeDetails != null) {
Map<String, Object> map = new TreeMap<String, Object>();
if (dfs != null) {
map.put("namenode_port", dfs.getNameNodePort());
}
FileWriter fw = new FileWriter(new File(writeDetails));
fw.write(new JSON().toJSON(map));
fw.close();
}
}
/**
* Parses arguments and fills out the member variables.
* @param args Command-line arguments.
* @return true on successful parse; false to indicate that the
* program should exit.
*/
private boolean parseArguments(String[] args) {
Options options = makeOptions();
CommandLine cli;
try {
CommandLineParser parser = new GnuParser();
cli = parser.parse(options, args);
} catch(ParseException e) {
LOG.warn("options parsing failed: "+e.getMessage());
new HelpFormatter().printHelp("...", options);
return false;
}
if (cli.hasOption("help")) {
new HelpFormatter().printHelp("...", options);
return false;
}
if (cli.getArgs().length > 0) {
for (String arg : cli.getArgs()) {
LOG.error("Unrecognized option: " + arg);
new HelpFormatter().printHelp("...", options);
return false;
}
}
// HDFS
numDataNodes = intArgument(cli, "datanodes", 1);
nameNodePort = intArgument(cli, "nnport", 0);
nameNodeHttpPort = intArgument(cli, "httpport", 0);
if (cli.hasOption("format")) {
dfsOpts = StartupOption.FORMAT;
format = true;
} else {
dfsOpts = StartupOption.REGULAR;
format = false;
}
// Runner
writeDetails = cli.getOptionValue("writeDetails");
writeConfig = cli.getOptionValue("writeConfig");
// General
conf = new HdfsConfiguration();
updateConfiguration(conf, cli.getOptionValues("D"));
return true;
}
/**
* Updates configuration based on what's given on the command line.
*
* @param conf2 The configuration object
* @param keyvalues An array of interleaved key value pairs.
*/
private void updateConfiguration(Configuration conf2, String[] keyvalues) {
int num_confs_updated = 0;
if (keyvalues != null) {
for (String prop : keyvalues) {
String[] keyval = prop.split("=", 2);
if (keyval.length == 2) {
conf2.set(keyval[0], keyval[1]);
num_confs_updated++;
} else {
LOG.warn("Ignoring -D option " + prop);
}
}
}
LOG.info("Updated " + num_confs_updated +
" configuration settings from command line.");
}
/**
* Extracts an integer argument with specified default value.
*/
private int intArgument(CommandLine cli, String argName, int defaultValue) {
String o = cli.getOptionValue(argName);
try {
if (o != null) {
return Integer.parseInt(o);
}
} catch (NumberFormatException ex) {
LOG.error("Couldn't parse value (" + o + ") for option "
+ argName + ". Using default: " + defaultValue);
}
return defaultValue;
}
/**
* Starts a MiniDFSClusterManager with parameters drawn from the command line.
*/
public static void main(String[] args) throws IOException {
new MiniDFSClusterManager().run(args);
}
}
| 8,856 | 31.682657 | 156 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/PathUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test;
import java.io.File;
import org.apache.commons.lang.RandomStringUtils;
import org.apache.hadoop.fs.Path;
public class PathUtils {
public static Path getTestPath(Class<?> caller) {
return getTestPath(caller, true);
}
public static Path getTestPath(Class<?> caller, boolean create) {
return new Path(getTestDirName(caller));
}
public static File getTestDir(Class<?> caller) {
return getTestDir(caller, true);
}
public static File getTestDir(Class<?> caller, boolean create) {
File dir =
new File(System.getProperty("test.build.data", "target/test/data")
+ "/" + RandomStringUtils.randomAlphanumeric(10),
caller.getSimpleName());
if (create) {
dir.mkdirs();
}
return dir;
}
public static String getTestDirName(Class<?> caller) {
return getTestDirName(caller, true);
}
public static String getTestDirName(Class<?> caller, boolean create) {
return getTestDir(caller, create).getAbsolutePath();
}
}
| 1,848 | 30.338983 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/HdfsTestDriver.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test;
import org.apache.hadoop.hdfs.BenchmarkThroughput;
import org.apache.hadoop.util.ProgramDriver;
/**
* Driver for HDFS tests. The tests should NOT depend on map-reduce APIs.
*/
public class HdfsTestDriver {
private final ProgramDriver pgd;
public HdfsTestDriver() {
this(new ProgramDriver());
}
public HdfsTestDriver(ProgramDriver pgd) {
this.pgd = pgd;
try {
pgd.addClass("dfsthroughput", BenchmarkThroughput.class,
"measure hdfs throughput");
pgd.addClass("minidfscluster", MiniDFSClusterManager.class,
"Run a single-process mini DFS cluster");
} catch(Throwable e) {
e.printStackTrace();
}
}
public void run(String argv[]) {
int exitCode = -1;
try {
exitCode = pgd.run(argv);
} catch(Throwable e) {
e.printStackTrace();
}
System.exit(exitCode);
}
public static void main(String argv[]){
new HdfsTestDriver().run(argv);
}
}
| 1,791 | 27.903226 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CACHEREPORT_INTERVAL_MSEC_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.EnumSet;
import java.util.Map;
import java.util.Random;
import java.util.concurrent.TimeoutException;
import org.apache.commons.lang.SystemUtils;
import org.apache.commons.lang.mutable.MutableBoolean;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hdfs.BlockReaderTestUtil;
import org.apache.hadoop.hdfs.ClientContext;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.ExtendedBlockId;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache.CacheVisitor;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplica;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.Slot;
import org.apache.hadoop.io.ByteBufferPool;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.io.nativeio.NativeIO.POSIX.CacheManipulator;
import org.apache.hadoop.net.unix.DomainSocket;
import org.apache.hadoop.net.unix.TemporarySocketDirectory;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.Assume;
import org.junit.BeforeClass;
import org.junit.Test;
import com.google.common.base.Preconditions;
import com.google.common.base.Supplier;
/**
* This class tests if EnhancedByteBufferAccess works correctly.
*/
public class TestEnhancedByteBufferAccess {
private static final Log LOG =
LogFactory.getLog(TestEnhancedByteBufferAccess.class.getName());
static private TemporarySocketDirectory sockDir;
static private CacheManipulator prevCacheManipulator;
@BeforeClass
public static void init() {
sockDir = new TemporarySocketDirectory();
DomainSocket.disableBindPathValidation();
prevCacheManipulator = NativeIO.POSIX.getCacheManipulator();
NativeIO.POSIX.setCacheManipulator(new CacheManipulator() {
@Override
public void mlock(String identifier,
ByteBuffer mmap, long length) throws IOException {
LOG.info("mlocking " + identifier);
}
});
}
@AfterClass
public static void teardown() {
// Restore the original CacheManipulator
NativeIO.POSIX.setCacheManipulator(prevCacheManipulator);
}
private static byte[] byteBufferToArray(ByteBuffer buf) {
byte resultArray[] = new byte[buf.remaining()];
buf.get(resultArray);
buf.flip();
return resultArray;
}
private static final int BLOCK_SIZE =
(int) NativeIO.POSIX.getCacheManipulator().getOperatingSystemPageSize();
public static HdfsConfiguration initZeroCopyTest() {
Assume.assumeTrue(NativeIO.isAvailable());
Assume.assumeTrue(SystemUtils.IS_OS_UNIX);
HdfsConfiguration conf = new HdfsConfiguration();
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
conf.setInt(HdfsClientConfigKeys.Mmap.CACHE_SIZE_KEY, 3);
conf.setLong(HdfsClientConfigKeys.Mmap.CACHE_TIMEOUT_MS_KEY, 100);
conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
new File(sockDir.getDir(),
"TestRequestMmapAccess._PORT.sock").getAbsolutePath());
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY,
true);
conf.setLong(DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setLong(DFS_CACHEREPORT_INTERVAL_MSEC_KEY, 1000);
conf.setLong(DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS, 1000);
return conf;
}
@Test
public void testZeroCopyReads() throws Exception {
HdfsConfiguration conf = initZeroCopyTest();
MiniDFSCluster cluster = null;
final Path TEST_PATH = new Path("/a");
FSDataInputStream fsIn = null;
final int TEST_FILE_LENGTH = 3 * BLOCK_SIZE;
FileSystem fs = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, TEST_PATH,
TEST_FILE_LENGTH, (short)1, 7567L);
try {
DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
} catch (InterruptedException e) {
Assert.fail("unexpected InterruptedException during " +
"waitReplication: " + e);
} catch (TimeoutException e) {
Assert.fail("unexpected TimeoutException during " +
"waitReplication: " + e);
}
fsIn = fs.open(TEST_PATH);
byte original[] = new byte[TEST_FILE_LENGTH];
IOUtils.readFully(fsIn, original, 0, TEST_FILE_LENGTH);
fsIn.close();
fsIn = fs.open(TEST_PATH);
ByteBuffer result = fsIn.read(null, BLOCK_SIZE,
EnumSet.of(ReadOption.SKIP_CHECKSUMS));
Assert.assertEquals(BLOCK_SIZE, result.remaining());
HdfsDataInputStream dfsIn = (HdfsDataInputStream)fsIn;
Assert.assertEquals(BLOCK_SIZE,
dfsIn.getReadStatistics().getTotalBytesRead());
Assert.assertEquals(BLOCK_SIZE,
dfsIn.getReadStatistics().getTotalZeroCopyBytesRead());
Assert.assertArrayEquals(Arrays.copyOfRange(original, 0, BLOCK_SIZE),
byteBufferToArray(result));
fsIn.releaseBuffer(result);
} finally {
if (fsIn != null) fsIn.close();
if (fs != null) fs.close();
if (cluster != null) cluster.shutdown();
}
}
@Test
public void testShortZeroCopyReads() throws Exception {
HdfsConfiguration conf = initZeroCopyTest();
MiniDFSCluster cluster = null;
final Path TEST_PATH = new Path("/a");
FSDataInputStream fsIn = null;
final int TEST_FILE_LENGTH = 3 * BLOCK_SIZE;
FileSystem fs = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, TEST_PATH, TEST_FILE_LENGTH, (short)1, 7567L);
try {
DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
} catch (InterruptedException e) {
Assert.fail("unexpected InterruptedException during " +
"waitReplication: " + e);
} catch (TimeoutException e) {
Assert.fail("unexpected TimeoutException during " +
"waitReplication: " + e);
}
fsIn = fs.open(TEST_PATH);
byte original[] = new byte[TEST_FILE_LENGTH];
IOUtils.readFully(fsIn, original, 0, TEST_FILE_LENGTH);
fsIn.close();
fsIn = fs.open(TEST_PATH);
// Try to read (2 * ${BLOCK_SIZE}), but only get ${BLOCK_SIZE} because of the block size.
HdfsDataInputStream dfsIn = (HdfsDataInputStream)fsIn;
ByteBuffer result =
dfsIn.read(null, 2 * BLOCK_SIZE, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
Assert.assertEquals(BLOCK_SIZE, result.remaining());
Assert.assertEquals(BLOCK_SIZE,
dfsIn.getReadStatistics().getTotalBytesRead());
Assert.assertEquals(BLOCK_SIZE,
dfsIn.getReadStatistics().getTotalZeroCopyBytesRead());
Assert.assertArrayEquals(Arrays.copyOfRange(original, 0, BLOCK_SIZE),
byteBufferToArray(result));
dfsIn.releaseBuffer(result);
// Try to read (1 + ${BLOCK_SIZE}), but only get ${BLOCK_SIZE} because of the block size.
result =
dfsIn.read(null, 1 + BLOCK_SIZE, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
Assert.assertEquals(BLOCK_SIZE, result.remaining());
Assert.assertArrayEquals(Arrays.copyOfRange(original, BLOCK_SIZE, 2 * BLOCK_SIZE),
byteBufferToArray(result));
dfsIn.releaseBuffer(result);
} finally {
if (fsIn != null) fsIn.close();
if (fs != null) fs.close();
if (cluster != null) cluster.shutdown();
}
}
@Test
public void testZeroCopyReadsNoFallback() throws Exception {
HdfsConfiguration conf = initZeroCopyTest();
MiniDFSCluster cluster = null;
final Path TEST_PATH = new Path("/a");
FSDataInputStream fsIn = null;
final int TEST_FILE_LENGTH = 3 * BLOCK_SIZE;
FileSystem fs = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, TEST_PATH,
TEST_FILE_LENGTH, (short)1, 7567L);
try {
DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
} catch (InterruptedException e) {
Assert.fail("unexpected InterruptedException during " +
"waitReplication: " + e);
} catch (TimeoutException e) {
Assert.fail("unexpected TimeoutException during " +
"waitReplication: " + e);
}
fsIn = fs.open(TEST_PATH);
byte original[] = new byte[TEST_FILE_LENGTH];
IOUtils.readFully(fsIn, original, 0, TEST_FILE_LENGTH);
fsIn.close();
fsIn = fs.open(TEST_PATH);
HdfsDataInputStream dfsIn = (HdfsDataInputStream)fsIn;
ByteBuffer result;
try {
result = dfsIn.read(null, BLOCK_SIZE + 1, EnumSet.noneOf(ReadOption.class));
Assert.fail("expected UnsupportedOperationException");
} catch (UnsupportedOperationException e) {
// expected
}
result = dfsIn.read(null, BLOCK_SIZE, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
Assert.assertEquals(BLOCK_SIZE, result.remaining());
Assert.assertEquals(BLOCK_SIZE,
dfsIn.getReadStatistics().getTotalBytesRead());
Assert.assertEquals(BLOCK_SIZE,
dfsIn.getReadStatistics().getTotalZeroCopyBytesRead());
Assert.assertArrayEquals(Arrays.copyOfRange(original, 0, BLOCK_SIZE),
byteBufferToArray(result));
} finally {
if (fsIn != null) fsIn.close();
if (fs != null) fs.close();
if (cluster != null) cluster.shutdown();
}
}
private static class CountingVisitor implements CacheVisitor {
private final int expectedNumOutstandingMmaps;
private final int expectedNumReplicas;
private final int expectedNumEvictable;
private final int expectedNumMmapedEvictable;
CountingVisitor(int expectedNumOutstandingMmaps,
int expectedNumReplicas, int expectedNumEvictable,
int expectedNumMmapedEvictable) {
this.expectedNumOutstandingMmaps = expectedNumOutstandingMmaps;
this.expectedNumReplicas = expectedNumReplicas;
this.expectedNumEvictable = expectedNumEvictable;
this.expectedNumMmapedEvictable = expectedNumMmapedEvictable;
}
@Override
public void visit(int numOutstandingMmaps,
Map<ExtendedBlockId, ShortCircuitReplica> replicas,
Map<ExtendedBlockId, InvalidToken> failedLoads,
Map<Long, ShortCircuitReplica> evictable,
Map<Long, ShortCircuitReplica> evictableMmapped) {
if (expectedNumOutstandingMmaps >= 0) {
Assert.assertEquals(expectedNumOutstandingMmaps, numOutstandingMmaps);
}
if (expectedNumReplicas >= 0) {
Assert.assertEquals(expectedNumReplicas, replicas.size());
}
if (expectedNumEvictable >= 0) {
Assert.assertEquals(expectedNumEvictable, evictable.size());
}
if (expectedNumMmapedEvictable >= 0) {
Assert.assertEquals(expectedNumMmapedEvictable, evictableMmapped.size());
}
}
}
@Test
public void testZeroCopyMmapCache() throws Exception {
HdfsConfiguration conf = initZeroCopyTest();
MiniDFSCluster cluster = null;
final Path TEST_PATH = new Path("/a");
final int TEST_FILE_LENGTH = 5 * BLOCK_SIZE;
final int RANDOM_SEED = 23453;
final String CONTEXT = "testZeroCopyMmapCacheContext";
FSDataInputStream fsIn = null;
ByteBuffer results[] = { null, null, null, null };
DistributedFileSystem fs = null;
conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, TEST_PATH,
TEST_FILE_LENGTH, (short)1, RANDOM_SEED);
try {
DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
} catch (InterruptedException e) {
Assert.fail("unexpected InterruptedException during " +
"waitReplication: " + e);
} catch (TimeoutException e) {
Assert.fail("unexpected TimeoutException during " +
"waitReplication: " + e);
}
fsIn = fs.open(TEST_PATH);
byte original[] = new byte[TEST_FILE_LENGTH];
IOUtils.readFully(fsIn, original, 0, TEST_FILE_LENGTH);
fsIn.close();
fsIn = fs.open(TEST_PATH);
final ShortCircuitCache cache = ClientContext.get(
CONTEXT, new DfsClientConf(conf)). getShortCircuitCache();
cache.accept(new CountingVisitor(0, 5, 5, 0));
results[0] = fsIn.read(null, BLOCK_SIZE,
EnumSet.of(ReadOption.SKIP_CHECKSUMS));
fsIn.seek(0);
results[1] = fsIn.read(null, BLOCK_SIZE,
EnumSet.of(ReadOption.SKIP_CHECKSUMS));
// The mmap should be of the first block of the file.
final ExtendedBlock firstBlock =
DFSTestUtil.getFirstBlock(fs, TEST_PATH);
cache.accept(new CacheVisitor() {
@Override
public void visit(int numOutstandingMmaps,
Map<ExtendedBlockId, ShortCircuitReplica> replicas,
Map<ExtendedBlockId, InvalidToken> failedLoads,
Map<Long, ShortCircuitReplica> evictable,
Map<Long, ShortCircuitReplica> evictableMmapped) {
ShortCircuitReplica replica = replicas.get(
new ExtendedBlockId(firstBlock.getBlockId(), firstBlock.getBlockPoolId()));
Assert.assertNotNull(replica);
Assert.assertTrue(replica.hasMmap());
// The replica should not yet be evictable, since we have it open.
Assert.assertNull(replica.getEvictableTimeNs());
}
});
// Read more blocks.
results[2] = fsIn.read(null, BLOCK_SIZE,
EnumSet.of(ReadOption.SKIP_CHECKSUMS));
results[3] = fsIn.read(null, BLOCK_SIZE,
EnumSet.of(ReadOption.SKIP_CHECKSUMS));
// we should have 3 mmaps, 1 evictable
cache.accept(new CountingVisitor(3, 5, 2, 0));
// After we close the cursors, the mmaps should be evictable for
// a brief period of time. Then, they should be closed (we're
// using a very quick timeout)
for (ByteBuffer buffer : results) {
if (buffer != null) {
fsIn.releaseBuffer(buffer);
}
}
fsIn.close();
GenericTestUtils.waitFor(new Supplier<Boolean>() {
public Boolean get() {
final MutableBoolean finished = new MutableBoolean(false);
cache.accept(new CacheVisitor() {
@Override
public void visit(int numOutstandingMmaps,
Map<ExtendedBlockId, ShortCircuitReplica> replicas,
Map<ExtendedBlockId, InvalidToken> failedLoads,
Map<Long, ShortCircuitReplica> evictable,
Map<Long, ShortCircuitReplica> evictableMmapped) {
finished.setValue(evictableMmapped.isEmpty());
}
});
return finished.booleanValue();
}
}, 10, 60000);
cache.accept(new CountingVisitor(0, -1, -1, -1));
fs.close();
cluster.shutdown();
}
/**
* Test HDFS fallback reads. HDFS streams support the ByteBufferReadable
* interface.
*/
@Test
public void testHdfsFallbackReads() throws Exception {
HdfsConfiguration conf = initZeroCopyTest();
MiniDFSCluster cluster = null;
final Path TEST_PATH = new Path("/a");
final int TEST_FILE_LENGTH = 16385;
final int RANDOM_SEED = 23453;
FSDataInputStream fsIn = null;
DistributedFileSystem fs = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, TEST_PATH,
TEST_FILE_LENGTH, (short)1, RANDOM_SEED);
try {
DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
} catch (InterruptedException e) {
Assert.fail("unexpected InterruptedException during " +
"waitReplication: " + e);
} catch (TimeoutException e) {
Assert.fail("unexpected TimeoutException during " +
"waitReplication: " + e);
}
fsIn = fs.open(TEST_PATH);
byte original[] = new byte[TEST_FILE_LENGTH];
IOUtils.readFully(fsIn, original, 0, TEST_FILE_LENGTH);
fsIn.close();
fsIn = fs.open(TEST_PATH);
testFallbackImpl(fsIn, original);
} finally {
if (fsIn != null) fsIn.close();
if (fs != null) fs.close();
if (cluster != null) cluster.shutdown();
}
}
private static class RestrictedAllocatingByteBufferPool
implements ByteBufferPool {
private final boolean direct;
RestrictedAllocatingByteBufferPool(boolean direct) {
this.direct = direct;
}
@Override
public ByteBuffer getBuffer(boolean direct, int length) {
Preconditions.checkArgument(this.direct == direct);
return direct ? ByteBuffer.allocateDirect(length) :
ByteBuffer.allocate(length);
}
@Override
public void putBuffer(ByteBuffer buffer) {
}
}
private static void testFallbackImpl(InputStream stream,
byte original[]) throws Exception {
RestrictedAllocatingByteBufferPool bufferPool =
new RestrictedAllocatingByteBufferPool(
stream instanceof ByteBufferReadable);
ByteBuffer result = ByteBufferUtil.fallbackRead(stream, bufferPool, 10);
Assert.assertEquals(10, result.remaining());
Assert.assertArrayEquals(Arrays.copyOfRange(original, 0, 10),
byteBufferToArray(result));
result = ByteBufferUtil.fallbackRead(stream, bufferPool, 5000);
Assert.assertEquals(5000, result.remaining());
Assert.assertArrayEquals(Arrays.copyOfRange(original, 10, 5010),
byteBufferToArray(result));
result = ByteBufferUtil.fallbackRead(stream, bufferPool, 9999999);
Assert.assertEquals(11375, result.remaining());
Assert.assertArrayEquals(Arrays.copyOfRange(original, 5010, 16385),
byteBufferToArray(result));
result = ByteBufferUtil.fallbackRead(stream, bufferPool, 10);
Assert.assertNull(result);
}
/**
* Test the {@link ByteBufferUtil#fallbackRead} function directly.
*/
@Test
public void testFallbackRead() throws Exception {
HdfsConfiguration conf = initZeroCopyTest();
MiniDFSCluster cluster = null;
final Path TEST_PATH = new Path("/a");
final int TEST_FILE_LENGTH = 16385;
final int RANDOM_SEED = 23453;
FSDataInputStream fsIn = null;
DistributedFileSystem fs = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, TEST_PATH,
TEST_FILE_LENGTH, (short)1, RANDOM_SEED);
try {
DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
} catch (InterruptedException e) {
Assert.fail("unexpected InterruptedException during " +
"waitReplication: " + e);
} catch (TimeoutException e) {
Assert.fail("unexpected TimeoutException during " +
"waitReplication: " + e);
}
fsIn = fs.open(TEST_PATH);
byte original[] = new byte[TEST_FILE_LENGTH];
IOUtils.readFully(fsIn, original, 0, TEST_FILE_LENGTH);
fsIn.close();
fsIn = fs.open(TEST_PATH);
testFallbackImpl(fsIn, original);
} finally {
if (fsIn != null) fsIn.close();
if (fs != null) fs.close();
if (cluster != null) cluster.shutdown();
}
}
/**
* Test fallback reads on a stream which does not support the
* ByteBufferReadable * interface.
*/
@Test
public void testIndirectFallbackReads() throws Exception {
final File TEST_DIR = new File(
System.getProperty("test.build.data","build/test/data"));
final String TEST_PATH = TEST_DIR + File.separator +
"indirectFallbackTestFile";
final int TEST_FILE_LENGTH = 16385;
final int RANDOM_SEED = 23453;
FileOutputStream fos = null;
FileInputStream fis = null;
try {
fos = new FileOutputStream(TEST_PATH);
Random random = new Random(RANDOM_SEED);
byte original[] = new byte[TEST_FILE_LENGTH];
random.nextBytes(original);
fos.write(original);
fos.close();
fos = null;
fis = new FileInputStream(TEST_PATH);
testFallbackImpl(fis, original);
} finally {
IOUtils.cleanup(LOG, fos, fis);
new File(TEST_PATH).delete();
}
}
/**
* Test that we can zero-copy read cached data even without disabling
* checksums.
*/
@Test(timeout=120000)
public void testZeroCopyReadOfCachedData() throws Exception {
BlockReaderTestUtil.enableShortCircuitShmTracing();
BlockReaderTestUtil.enableBlockReaderFactoryTracing();
BlockReaderTestUtil.enableHdfsCachingTracing();
final int TEST_FILE_LENGTH = BLOCK_SIZE;
final Path TEST_PATH = new Path("/a");
final int RANDOM_SEED = 23453;
HdfsConfiguration conf = initZeroCopyTest();
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY,
false);
final String CONTEXT = "testZeroCopyReadOfCachedData";
conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT);
conf.setLong(DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
DFSTestUtil.roundUpToMultiple(TEST_FILE_LENGTH,
(int) NativeIO.POSIX.getCacheManipulator().getOperatingSystemPageSize()));
MiniDFSCluster cluster = null;
ByteBuffer result = null, result2 = null;
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
FsDatasetSpi<?> fsd = cluster.getDataNodes().get(0).getFSDataset();
DistributedFileSystem fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, TEST_PATH,
TEST_FILE_LENGTH, (short)1, RANDOM_SEED);
DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
byte original[] = DFSTestUtil.
calculateFileContentsFromSeed(RANDOM_SEED, TEST_FILE_LENGTH);
// Prior to caching, the file can't be read via zero-copy
FSDataInputStream fsIn = fs.open(TEST_PATH);
try {
result = fsIn.read(null, TEST_FILE_LENGTH / 2,
EnumSet.noneOf(ReadOption.class));
Assert.fail("expected UnsupportedOperationException");
} catch (UnsupportedOperationException e) {
// expected
}
// Cache the file
fs.addCachePool(new CachePoolInfo("pool1"));
long directiveId = fs.addCacheDirective(new CacheDirectiveInfo.Builder().
setPath(TEST_PATH).
setReplication((short)1).
setPool("pool1").
build());
int numBlocks = (int)Math.ceil((double)TEST_FILE_LENGTH / BLOCK_SIZE);
DFSTestUtil.verifyExpectedCacheUsage(
DFSTestUtil.roundUpToMultiple(TEST_FILE_LENGTH, BLOCK_SIZE),
numBlocks, cluster.getDataNodes().get(0).getFSDataset());
try {
result = fsIn.read(null, TEST_FILE_LENGTH,
EnumSet.noneOf(ReadOption.class));
} catch (UnsupportedOperationException e) {
Assert.fail("expected to be able to read cached file via zero-copy");
}
Assert.assertArrayEquals(Arrays.copyOfRange(original, 0,
BLOCK_SIZE), byteBufferToArray(result));
// Test that files opened after the cache operation has finished
// still get the benefits of zero-copy (regression test for HDFS-6086)
FSDataInputStream fsIn2 = fs.open(TEST_PATH);
try {
result2 = fsIn2.read(null, TEST_FILE_LENGTH,
EnumSet.noneOf(ReadOption.class));
} catch (UnsupportedOperationException e) {
Assert.fail("expected to be able to read cached file via zero-copy");
}
Assert.assertArrayEquals(Arrays.copyOfRange(original, 0,
BLOCK_SIZE), byteBufferToArray(result2));
fsIn2.releaseBuffer(result2);
fsIn2.close();
// check that the replica is anchored
final ExtendedBlock firstBlock =
DFSTestUtil.getFirstBlock(fs, TEST_PATH);
final ShortCircuitCache cache = ClientContext.get(
CONTEXT, new DfsClientConf(conf)). getShortCircuitCache();
waitForReplicaAnchorStatus(cache, firstBlock, true, true, 1);
// Uncache the replica
fs.removeCacheDirective(directiveId);
waitForReplicaAnchorStatus(cache, firstBlock, false, true, 1);
fsIn.releaseBuffer(result);
waitForReplicaAnchorStatus(cache, firstBlock, false, false, 1);
DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd);
fsIn.close();
fs.close();
cluster.shutdown();
}
private void waitForReplicaAnchorStatus(final ShortCircuitCache cache,
final ExtendedBlock block, final boolean expectedIsAnchorable,
final boolean expectedIsAnchored, final int expectedOutstandingMmaps)
throws Exception {
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
final MutableBoolean result = new MutableBoolean(false);
cache.accept(new CacheVisitor() {
@Override
public void visit(int numOutstandingMmaps,
Map<ExtendedBlockId, ShortCircuitReplica> replicas,
Map<ExtendedBlockId, InvalidToken> failedLoads,
Map<Long, ShortCircuitReplica> evictable,
Map<Long, ShortCircuitReplica> evictableMmapped) {
Assert.assertEquals(expectedOutstandingMmaps, numOutstandingMmaps);
ShortCircuitReplica replica =
replicas.get(ExtendedBlockId.fromExtendedBlock(block));
Assert.assertNotNull(replica);
Slot slot = replica.getSlot();
if ((expectedIsAnchorable != slot.isAnchorable()) ||
(expectedIsAnchored != slot.isAnchored())) {
LOG.info("replica " + replica + " has isAnchorable = " +
slot.isAnchorable() + ", isAnchored = " + slot.isAnchored() +
". Waiting for isAnchorable = " + expectedIsAnchorable +
", isAnchored = " + expectedIsAnchored);
return;
}
result.setValue(true);
}
});
return result.toBoolean();
}
}, 10, 60000);
}
@Test
public void testClientMmapDisable() throws Exception {
HdfsConfiguration conf = initZeroCopyTest();
conf.setBoolean(HdfsClientConfigKeys.Mmap.ENABLED_KEY, false);
MiniDFSCluster cluster = null;
final Path TEST_PATH = new Path("/a");
final int TEST_FILE_LENGTH = 16385;
final int RANDOM_SEED = 23453;
final String CONTEXT = "testClientMmapDisable";
FSDataInputStream fsIn = null;
DistributedFileSystem fs = null;
conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT);
try {
// With HdfsClientConfigKeys.Mmap.ENABLED_KEY set to false,
// we should not do memory mapped reads.
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, TEST_PATH,
TEST_FILE_LENGTH, (short)1, RANDOM_SEED);
DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
fsIn = fs.open(TEST_PATH);
try {
fsIn.read(null, 1, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
Assert.fail("expected zero-copy read to fail when client mmaps " +
"were disabled.");
} catch (UnsupportedOperationException e) {
}
} finally {
if (fsIn != null) fsIn.close();
if (fs != null) fs.close();
if (cluster != null) cluster.shutdown();
}
fsIn = null;
fs = null;
cluster = null;
try {
// Now try again with HdfsClientConfigKeys.Mmap.CACHE_SIZE_KEY == 0.
conf.setBoolean(HdfsClientConfigKeys.Mmap.ENABLED_KEY, true);
conf.setInt(HdfsClientConfigKeys.Mmap.CACHE_SIZE_KEY, 0);
conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT + ".1");
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, TEST_PATH,
TEST_FILE_LENGTH, (short)1, RANDOM_SEED);
DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
fsIn = fs.open(TEST_PATH);
ByteBuffer buf = fsIn.read(null, 1, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
fsIn.releaseBuffer(buf);
// Test EOF behavior
IOUtils.skipFully(fsIn, TEST_FILE_LENGTH - 1);
buf = fsIn.read(null, 1, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
Assert.assertEquals(null, buf);
} finally {
if (fsIn != null) fsIn.close();
if (fs != null) fs.close();
if (cluster != null) cluster.shutdown();
}
}
@Test
public void test2GBMmapLimit() throws Exception {
Assume.assumeTrue(BlockReaderTestUtil.shouldTestLargeFiles());
HdfsConfiguration conf = initZeroCopyTest();
final long TEST_FILE_LENGTH = 2469605888L;
conf.set(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY, "NULL");
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, TEST_FILE_LENGTH);
MiniDFSCluster cluster = null;
final Path TEST_PATH = new Path("/a");
final String CONTEXT = "test2GBMmapLimit";
conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT);
FSDataInputStream fsIn = null, fsIn2 = null;
ByteBuffer buf1 = null, buf2 = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, TEST_PATH, TEST_FILE_LENGTH, (short)1, 0xB);
DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
fsIn = fs.open(TEST_PATH);
buf1 = fsIn.read(null, 1, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
Assert.assertEquals(1, buf1.remaining());
fsIn.releaseBuffer(buf1);
buf1 = null;
fsIn.seek(2147483640L);
buf1 = fsIn.read(null, 1024, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
Assert.assertEquals(7, buf1.remaining());
Assert.assertEquals(Integer.MAX_VALUE, buf1.limit());
fsIn.releaseBuffer(buf1);
buf1 = null;
Assert.assertEquals(2147483647L, fsIn.getPos());
try {
buf1 = fsIn.read(null, 1024,
EnumSet.of(ReadOption.SKIP_CHECKSUMS));
Assert.fail("expected UnsupportedOperationException");
} catch (UnsupportedOperationException e) {
// expected; can't read past 2GB boundary.
}
fsIn.close();
fsIn = null;
// Now create another file with normal-sized blocks, and verify we
// can read past 2GB
final Path TEST_PATH2 = new Path("/b");
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 268435456L);
DFSTestUtil.createFile(fs, TEST_PATH2, 1024 * 1024, TEST_FILE_LENGTH,
268435456L, (short)1, 0xA);
fsIn2 = fs.open(TEST_PATH2);
fsIn2.seek(2147483640L);
buf2 = fsIn2.read(null, 1024, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
Assert.assertEquals(8, buf2.remaining());
Assert.assertEquals(2147483648L, fsIn2.getPos());
fsIn2.releaseBuffer(buf2);
buf2 = null;
buf2 = fsIn2.read(null, 1024, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
Assert.assertEquals(1024, buf2.remaining());
Assert.assertEquals(2147484672L, fsIn2.getPos());
fsIn2.releaseBuffer(buf2);
buf2 = null;
} finally {
if (buf1 != null) {
fsIn.releaseBuffer(buf1);
}
if (buf2 != null) {
fsIn2.releaseBuffer(buf2);
}
IOUtils.cleanup(null, fsIn, fsIn2);
if (cluster != null) {
cluster.shutdown();
}
}
}
}
| 33,458 | 38.225088 | 101 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfsDisable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import static org.junit.Assert.fail;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Test;
public class TestSymlinkHdfsDisable {
@Test(timeout=60000)
public void testSymlinkHdfsDisable() throws Exception {
Configuration conf = new HdfsConfiguration();
// disable symlink resolution
conf.setBoolean(
CommonConfigurationKeys.FS_CLIENT_RESOLVE_REMOTE_SYMLINKS_KEY, false);
// spin up minicluster, get dfs and filecontext
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
DistributedFileSystem dfs = cluster.getFileSystem();
FileContext fc = FileContext.getFileContext(cluster.getURI(0), conf);
// Create test files/links
FileContextTestHelper helper = new FileContextTestHelper(
"/tmp/TestSymlinkHdfsDisable");
Path root = helper.getTestRootPath(fc);
Path target = new Path(root, "target");
Path link = new Path(root, "link");
DFSTestUtil.createFile(dfs, target, 4096, (short)1, 0xDEADDEAD);
fc.createSymlink(target, link, false);
// Try to resolve links with FileSystem and FileContext
try {
fc.open(link);
fail("Expected error when attempting to resolve link");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("resolution is disabled", e);
}
try {
dfs.open(link);
fail("Expected error when attempting to resolve link");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("resolution is disabled", e);
}
}
}
| 2,624 | 37.602941 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandlerFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import org.junit.Test;
import java.util.ArrayList;
import java.util.Random;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
/**
* Test of the URL stream handler factory.
*/
public class TestUrlStreamHandlerFactory {
private static final int RUNS = 20;
private static final int THREADS = 10;
private static final int TASKS = 200;
private static final int TIMEOUT = 30;
@Test
public void testConcurrency() throws Exception {
for (int i = 0; i < RUNS; i++) {
singleRun();
}
}
private void singleRun() throws Exception {
final FsUrlStreamHandlerFactory factory = new FsUrlStreamHandlerFactory();
final Random random = new Random();
ExecutorService executor = Executors.newFixedThreadPool(THREADS);
ArrayList<Future<?>> futures = new ArrayList<Future<?>>(TASKS);
for (int i = 0; i < TASKS ; i++) {
final int aux = i;
futures.add(executor.submit(new Runnable() {
@Override
public void run() {
int rand = aux + random.nextInt(3);
factory.createURLStreamHandler(String.valueOf(rand));
}
}));
}
executor.shutdown();
try {
executor.awaitTermination(TIMEOUT, TimeUnit.SECONDS);
executor.shutdownNow();
} catch (InterruptedException e) {
// pass
}
// check for exceptions
for (Future future : futures) {
if (!future.isDone()) {
break; // timed out
}
future.get();
}
}
}
| 2,400 | 28.641975 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import static org.apache.hadoop.fs.FileContextTestHelper.exists;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import javax.security.auth.login.LoginException;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.hdfs.AppendTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestHDFSFileContextMainOperations extends
FileContextMainOperationsBaseTest {
private static MiniDFSCluster cluster;
private static Path defaultWorkingDirectory;
private static final HdfsConfiguration CONF = new HdfsConfiguration();
@Override
protected FileContextTestHelper createFileContextHelper() {
return new FileContextTestHelper("/tmp/TestHDFSFileContextMainOperations");
}
@BeforeClass
public static void clusterSetupAtBegining() throws IOException,
LoginException, URISyntaxException {
cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build();
cluster.waitClusterUp();
URI uri0 = cluster.getURI(0);
fc = FileContext.getFileContext(uri0, CONF);
defaultWorkingDirectory = fc.makeQualified( new Path("/user/" +
UserGroupInformation.getCurrentUser().getShortUserName()));
fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true);
}
private static void restartCluster() throws IOException, LoginException {
if (cluster != null) {
cluster.shutdown();
cluster = null;
}
cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(1)
.format(false).build();
cluster.waitClusterUp();
fc = FileContext.getFileContext(cluster.getURI(0), CONF);
defaultWorkingDirectory = fc.makeQualified( new Path("/user/" +
UserGroupInformation.getCurrentUser().getShortUserName()));
fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true);
}
@AfterClass
public static void ClusterShutdownAtEnd() throws Exception {
if (cluster != null) {
cluster.shutdown();
cluster = null;
}
}
@Override
@Before
public void setUp() throws Exception {
super.setUp();
}
@Override
@After
public void tearDown() throws Exception {
super.tearDown();
}
@Override
protected Path getDefaultWorkingDirectory() {
return defaultWorkingDirectory;
}
@Override
protected IOException unwrapException(IOException e) {
if (e instanceof RemoteException) {
return ((RemoteException) e).unwrapRemoteException();
}
return e;
}
private Path getTestRootPath(FileContext fc, String path) {
return fileContextTestHelper.getTestRootPath(fc, path);
}
@Test
public void testTruncate() throws Exception {
final short repl = 3;
final int blockSize = 1024;
final int numOfBlocks = 2;
DistributedFileSystem fs = cluster.getFileSystem();
Path dir = getTestRootPath(fc, "test/hadoop");
Path file = getTestRootPath(fc, "test/hadoop/file");
final byte[] data = FileSystemTestHelper.getFileData(
numOfBlocks, blockSize);
FileSystemTestHelper.createFile(fs, file, data, blockSize, repl);
final int newLength = blockSize;
boolean isReady = fc.truncate(file, newLength);
Assert.assertTrue("Recovery is not expected.", isReady);
FileStatus fileStatus = fc.getFileStatus(file);
Assert.assertEquals(fileStatus.getLen(), newLength);
AppendTestUtil.checkFullFile(fs, file, newLength, data, file.toString());
ContentSummary cs = fs.getContentSummary(dir);
Assert.assertEquals("Bad disk space usage", cs.getSpaceConsumed(),
newLength * repl);
Assert.assertTrue(fs.delete(dir, true));
}
@Test
public void testOldRenameWithQuota() throws Exception {
DistributedFileSystem fs = cluster.getFileSystem();
Path src1 = getTestRootPath(fc, "test/testOldRenameWithQuota/srcdir/src1");
Path src2 = getTestRootPath(fc, "test/testOldRenameWithQuota/srcdir/src2");
Path dst1 = getTestRootPath(fc, "test/testOldRenameWithQuota/dstdir/dst1");
Path dst2 = getTestRootPath(fc, "test/testOldRenameWithQuota/dstdir/dst2");
createFile(src1);
createFile(src2);
fs.setQuota(src1.getParent(), HdfsConstants.QUOTA_DONT_SET,
HdfsConstants.QUOTA_DONT_SET);
fc.mkdir(dst1.getParent(), FileContext.DEFAULT_PERM, true);
fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET);
/*
* Test1: src does not exceed quota and dst has no quota check and hence
* accommodates rename
*/
oldRename(src1, dst1, true, false);
/*
* Test2: src does not exceed quota and dst has *no* quota to accommodate
* rename.
*/
// dstDir quota = 1 and dst1 already uses it
oldRename(src2, dst2, false, true);
/*
* Test3: src exceeds quota and dst has *no* quota to accommodate rename
*/
// src1 has no quota to accommodate new rename node
fs.setQuota(src1.getParent(), 1, HdfsConstants.QUOTA_DONT_SET);
oldRename(dst1, src1, false, true);
}
@Test
public void testRenameWithQuota() throws Exception {
DistributedFileSystem fs = cluster.getFileSystem();
Path src1 = getTestRootPath(fc, "test/testRenameWithQuota/srcdir/src1");
Path src2 = getTestRootPath(fc, "test/testRenameWithQuota/srcdir/src2");
Path dst1 = getTestRootPath(fc, "test/testRenameWithQuota/dstdir/dst1");
Path dst2 = getTestRootPath(fc, "test/testRenameWithQuota/dstdir/dst2");
createFile(src1);
createFile(src2);
fs.setQuota(src1.getParent(), HdfsConstants.QUOTA_DONT_SET,
HdfsConstants.QUOTA_DONT_SET);
fc.mkdir(dst1.getParent(), FileContext.DEFAULT_PERM, true);
fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET);
/*
* Test1: src does not exceed quota and dst has no quota check and hence
* accommodates rename
*/
// rename uses dstdir quota=1
rename(src1, dst1, false, true, false, Rename.NONE);
// rename reuses dstdir quota=1
rename(src2, dst1, true, true, false, Rename.OVERWRITE);
/*
* Test2: src does not exceed quota and dst has *no* quota to accommodate
* rename.
*/
// dstDir quota = 1 and dst1 already uses it
createFile(src2);
rename(src2, dst2, false, false, true, Rename.NONE);
/*
* Test3: src exceeds quota and dst has *no* quota to accommodate rename
* rename to a destination that does not exist
*/
// src1 has no quota to accommodate new rename node
fs.setQuota(src1.getParent(), 1, HdfsConstants.QUOTA_DONT_SET);
rename(dst1, src1, false, false, true, Rename.NONE);
/*
* Test4: src exceeds quota and dst has *no* quota to accommodate rename
* rename to a destination that exists and quota freed by deletion of dst
* is same as quota needed by src.
*/
// src1 has no quota to accommodate new rename node
fs.setQuota(src1.getParent(), 100, HdfsConstants.QUOTA_DONT_SET);
createFile(src1);
fs.setQuota(src1.getParent(), 1, HdfsConstants.QUOTA_DONT_SET);
rename(dst1, src1, true, true, false, Rename.OVERWRITE);
}
@Test
public void testRenameRoot() throws Exception {
Path src = getTestRootPath(fc, "test/testRenameRoot/srcdir/src1");
Path dst = new Path("/");
createFile(src);
rename(src, dst, true, false, true, Rename.OVERWRITE);
rename(dst, src, true, false, true, Rename.OVERWRITE);
}
/**
* Perform operations such as setting quota, deletion of files, rename and
* ensure system can apply edits log during startup.
*/
@Test
public void testEditsLogOldRename() throws Exception {
DistributedFileSystem fs = cluster.getFileSystem();
Path src1 = getTestRootPath(fc, "testEditsLogOldRename/srcdir/src1");
Path dst1 = getTestRootPath(fc, "testEditsLogOldRename/dstdir/dst1");
createFile(src1);
fs.mkdirs(dst1.getParent());
createFile(dst1);
// Set quota so that dst1 parent cannot allow under it new files/directories
fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET);
// Free up quota for a subsequent rename
fs.delete(dst1, true);
oldRename(src1, dst1, true, false);
// Restart the cluster and ensure the above operations can be
// loaded from the edits log
restartCluster();
fs = cluster.getFileSystem();
src1 = getTestRootPath(fc, "testEditsLogOldRename/srcdir/src1");
dst1 = getTestRootPath(fc, "testEditsLogOldRename/dstdir/dst1");
Assert.assertFalse(fs.exists(src1)); // ensure src1 is already renamed
Assert.assertTrue(fs.exists(dst1)); // ensure rename dst exists
}
/**
* Perform operations such as setting quota, deletion of files, rename and
* ensure system can apply edits log during startup.
*/
@Test
public void testEditsLogRename() throws Exception {
DistributedFileSystem fs = cluster.getFileSystem();
Path src1 = getTestRootPath(fc, "testEditsLogRename/srcdir/src1");
Path dst1 = getTestRootPath(fc, "testEditsLogRename/dstdir/dst1");
createFile(src1);
fs.mkdirs(dst1.getParent());
createFile(dst1);
// Set quota so that dst1 parent cannot allow under it new files/directories
fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET);
// Free up quota for a subsequent rename
fs.delete(dst1, true);
rename(src1, dst1, true, true, false, Rename.OVERWRITE);
// Restart the cluster and ensure the above operations can be
// loaded from the edits log
restartCluster();
fs = cluster.getFileSystem();
src1 = getTestRootPath(fc, "testEditsLogRename/srcdir/src1");
dst1 = getTestRootPath(fc, "testEditsLogRename/dstdir/dst1");
Assert.assertFalse(fs.exists(src1)); // ensure src1 is already renamed
Assert.assertTrue(fs.exists(dst1)); // ensure rename dst exists
}
@Test
public void testIsValidNameInvalidNames() {
String[] invalidNames = {
"/foo/../bar",
"/foo/./bar",
"/foo/:/bar",
"/foo:bar"
};
for (String invalidName: invalidNames) {
Assert.assertFalse(invalidName + " is not valid",
fc.getDefaultFileSystem().isValidName(invalidName));
}
}
private void oldRename(Path src, Path dst, boolean renameSucceeds,
boolean exception) throws Exception {
DistributedFileSystem fs = cluster.getFileSystem();
try {
Assert.assertEquals(renameSucceeds, fs.rename(src, dst));
} catch (Exception ex) {
Assert.assertTrue(exception);
}
Assert.assertEquals(renameSucceeds, !exists(fc, src));
Assert.assertEquals(renameSucceeds, exists(fc, dst));
}
private void rename(Path src, Path dst, boolean dstExists,
boolean renameSucceeds, boolean exception, Options.Rename... options)
throws Exception {
try {
fc.rename(src, dst, options);
Assert.assertTrue(renameSucceeds);
} catch (Exception ex) {
Assert.assertTrue(exception);
}
Assert.assertEquals(renameSucceeds, !exists(fc, src));
Assert.assertEquals((dstExists||renameSucceeds), exists(fc, dst));
}
@Override
protected boolean listCorruptedBlocksSupported() {
return true;
}
@Test
public void testCrossFileSystemRename() throws IOException {
try {
fc.rename(
new Path("hdfs://127.0.0.1/aaa/bbb/Foo"),
new Path("file://aaa/bbb/Moo"),
Options.Rename.OVERWRITE);
fail("IOexception expected.");
} catch (IOException ioe) {
// okay
}
}
}
| 12,762 | 34.452778 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsPermission.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
import java.net.URISyntaxException;
import javax.security.auth.login.LoginException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
public class TestFcHdfsPermission extends FileContextPermissionBase {
private static final FileContextTestHelper fileContextTestHelper =
new FileContextTestHelper("/tmp/TestFcHdfsPermission");
private static FileContext fc;
private static MiniDFSCluster cluster;
private static Path defaultWorkingDirectory;
@Override
protected FileContextTestHelper getFileContextHelper() {
return fileContextTestHelper;
}
@Override
protected FileContext getFileContext() {
return fc;
}
@BeforeClass
public static void clusterSetupAtBegining()
throws IOException, LoginException, URISyntaxException {
Configuration conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
fc = FileContext.getFileContext(cluster.getURI(0), conf);
defaultWorkingDirectory = fc.makeQualified( new Path("/user/" +
UserGroupInformation.getCurrentUser().getShortUserName()));
fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true);
}
@AfterClass
public static void ClusterShutdownAtEnd() throws Exception {
cluster.shutdown();
}
@Override
@Before
public void setUp() throws Exception {
super.setUp();
}
@Override
@After
public void tearDown() throws Exception {
super.tearDown();
}
}
| 2,682 | 30.940476 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.net.URI;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.web.WebHdfsConstants;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
/**
* Test symbolic links in Hdfs.
*/
abstract public class TestSymlinkHdfs extends SymlinkBaseTest {
{
GenericTestUtils.setLogLevel(NameNode.stateChangeLog, Level.ALL);
}
protected static MiniDFSCluster cluster;
protected static WebHdfsFileSystem webhdfs;
protected static DistributedFileSystem dfs;
@Override
protected String getScheme() {
return "hdfs";
}
@Override
protected String testBaseDir1() throws IOException {
return "/test1";
}
@Override
protected String testBaseDir2() throws IOException {
return "/test2";
}
@Override
protected URI testURI() {
return cluster.getURI(0);
}
@Override
protected IOException unwrapException(IOException e) {
if (e instanceof RemoteException) {
return ((RemoteException)e).unwrapRemoteException();
}
return e;
}
@BeforeClass
public static void beforeClassSetup() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
conf.set(FsPermission.UMASK_LABEL, "000");
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_COMPONENT_LENGTH_KEY, 0);
cluster = new MiniDFSCluster.Builder(conf).build();
webhdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
dfs = cluster.getFileSystem();
}
@AfterClass
public static void afterClassTeardown() throws Exception {
cluster.shutdown();
}
@Test(timeout=10000)
/** Access a file using a link that spans Hdfs to LocalFs */
public void testLinkAcrossFileSystems() throws IOException {
Path localDir = new Path("file://" + wrapper.getAbsoluteTestRootDir()
+ "/test");
Path localFile = new Path("file://" + wrapper.getAbsoluteTestRootDir()
+ "/test/file");
Path link = new Path(testBaseDir1(), "linkToFile");
FSTestWrapper localWrapper = wrapper.getLocalFSWrapper();
localWrapper.delete(localDir, true);
localWrapper.mkdir(localDir, FileContext.DEFAULT_PERM, true);
localWrapper.setWorkingDirectory(localDir);
assertEquals(localDir, localWrapper.getWorkingDirectory());
createAndWriteFile(localWrapper, localFile);
wrapper.createSymlink(localFile, link, false);
readFile(link);
assertEquals(fileSize, wrapper.getFileStatus(link).getLen());
}
@Test(timeout=10000)
/** Test renaming a file across two file systems using a link */
public void testRenameAcrossFileSystemsViaLink() throws IOException {
Path localDir = new Path("file://" + wrapper.getAbsoluteTestRootDir()
+ "/test");
Path hdfsFile = new Path(testBaseDir1(), "file");
Path link = new Path(testBaseDir1(), "link");
Path hdfsFileNew = new Path(testBaseDir1(), "fileNew");
Path hdfsFileNewViaLink = new Path(link, "fileNew");
FSTestWrapper localWrapper = wrapper.getLocalFSWrapper();
localWrapper.delete(localDir, true);
localWrapper.mkdir(localDir, FileContext.DEFAULT_PERM, true);
localWrapper.setWorkingDirectory(localDir);
createAndWriteFile(hdfsFile);
wrapper.createSymlink(localDir, link, false);
// Rename hdfs://test1/file to hdfs://test1/link/fileNew
// which renames to file://TEST_ROOT/test/fileNew which
// spans AbstractFileSystems and therefore fails.
try {
wrapper.rename(hdfsFile, hdfsFileNewViaLink);
fail("Renamed across file systems");
} catch (InvalidPathException ipe) {
// Expected from FileContext
} catch (IllegalArgumentException e) {
// Expected from Filesystem
GenericTestUtils.assertExceptionContains("Wrong FS: ", e);
}
// Now rename hdfs://test1/link/fileNew to hdfs://test1/fileNew
// which renames file://TEST_ROOT/test/fileNew to hdfs://test1/fileNew
// which spans AbstractFileSystems and therefore fails.
createAndWriteFile(hdfsFileNewViaLink);
try {
wrapper.rename(hdfsFileNewViaLink, hdfsFileNew);
fail("Renamed across file systems");
} catch (InvalidPathException ipe) {
// Expected from FileContext
} catch (IllegalArgumentException e) {
// Expected from Filesystem
GenericTestUtils.assertExceptionContains("Wrong FS: ", e);
}
}
@Test(timeout=10000)
/** Test create symlink to / */
public void testCreateLinkToSlash() throws IOException {
Path dir = new Path(testBaseDir1());
Path file = new Path(testBaseDir1(), "file");
Path link = new Path(testBaseDir1(), "linkToSlash");
Path fileViaLink = new Path(testBaseDir1()+"/linkToSlash"+
testBaseDir1()+"/file");
createAndWriteFile(file);
wrapper.setWorkingDirectory(dir);
wrapper.createSymlink(new Path("/"), link, false);
readFile(fileViaLink);
assertEquals(fileSize, wrapper.getFileStatus(fileViaLink).getLen());
// Ditto when using another file context since the file system
// for the slash is resolved according to the link's parent.
if (wrapper instanceof FileContextTestWrapper) {
FSTestWrapper localWrapper = wrapper.getLocalFSWrapper();
Path linkQual = new Path(cluster.getURI(0).toString(), fileViaLink);
assertEquals(fileSize, localWrapper.getFileStatus(linkQual).getLen());
}
}
@Test(timeout=10000)
/** setPermission affects the target not the link */
public void testSetPermissionAffectsTarget() throws IOException {
Path file = new Path(testBaseDir1(), "file");
Path dir = new Path(testBaseDir2());
Path linkToFile = new Path(testBaseDir1(), "linkToFile");
Path linkToDir = new Path(testBaseDir1(), "linkToDir");
createAndWriteFile(file);
wrapper.createSymlink(file, linkToFile, false);
wrapper.createSymlink(dir, linkToDir, false);
// Changing the permissions using the link does not modify
// the permissions of the link..
FsPermission perms = wrapper.getFileLinkStatus(linkToFile).getPermission();
wrapper.setPermission(linkToFile, new FsPermission((short)0664));
wrapper.setOwner(linkToFile, "user", "group");
assertEquals(perms, wrapper.getFileLinkStatus(linkToFile).getPermission());
// but the file's permissions were adjusted appropriately
FileStatus stat = wrapper.getFileStatus(file);
assertEquals(0664, stat.getPermission().toShort());
assertEquals("user", stat.getOwner());
assertEquals("group", stat.getGroup());
// Getting the file's permissions via the link is the same
// as getting the permissions directly.
assertEquals(stat.getPermission(),
wrapper.getFileStatus(linkToFile).getPermission());
// Ditto for a link to a directory
perms = wrapper.getFileLinkStatus(linkToDir).getPermission();
wrapper.setPermission(linkToDir, new FsPermission((short)0664));
wrapper.setOwner(linkToDir, "user", "group");
assertEquals(perms, wrapper.getFileLinkStatus(linkToDir).getPermission());
stat = wrapper.getFileStatus(dir);
assertEquals(0664, stat.getPermission().toShort());
assertEquals("user", stat.getOwner());
assertEquals("group", stat.getGroup());
assertEquals(stat.getPermission(),
wrapper.getFileStatus(linkToDir).getPermission());
}
@Test(timeout=10000)
/** Create a symlink using a path with scheme but no authority */
public void testCreateWithPartQualPathFails() throws IOException {
Path fileWoAuth = new Path("hdfs:///test/file");
Path linkWoAuth = new Path("hdfs:///test/link");
try {
createAndWriteFile(fileWoAuth);
fail("HDFS requires URIs with schemes have an authority");
} catch (RuntimeException e) {
// Expected
}
try {
wrapper.createSymlink(new Path("foo"), linkWoAuth, false);
fail("HDFS requires URIs with schemes have an authority");
} catch (RuntimeException e) {
// Expected
}
}
@Test(timeout=10000)
/** setReplication affects the target not the link */
public void testSetReplication() throws IOException {
Path file = new Path(testBaseDir1(), "file");
Path link = new Path(testBaseDir1(), "linkToFile");
createAndWriteFile(file);
wrapper.createSymlink(file, link, false);
wrapper.setReplication(link, (short)2);
assertEquals(0, wrapper.getFileLinkStatus(link).getReplication());
assertEquals(2, wrapper.getFileStatus(link).getReplication());
assertEquals(2, wrapper.getFileStatus(file).getReplication());
}
@Test(timeout=10000)
/** Test create symlink with a max len name */
public void testCreateLinkMaxPathLink() throws IOException {
Path dir = new Path(testBaseDir1());
Path file = new Path(testBaseDir1(), "file");
final int maxPathLen = HdfsServerConstants.MAX_PATH_LENGTH;
final int dirLen = dir.toString().length() + 1;
int len = maxPathLen - dirLen;
// Build a MAX_PATH_LENGTH path
StringBuilder sb = new StringBuilder("");
for (int i = 0; i < (len / 10); i++) {
sb.append("0123456789");
}
for (int i = 0; i < (len % 10); i++) {
sb.append("x");
}
Path link = new Path(sb.toString());
assertEquals(maxPathLen, dirLen + link.toString().length());
// Check that it works
createAndWriteFile(file);
wrapper.setWorkingDirectory(dir);
wrapper.createSymlink(file, link, false);
readFile(link);
// Now modify the path so it's too large
link = new Path(sb.toString()+"x");
try {
wrapper.createSymlink(file, link, false);
fail("Path name should be too long");
} catch (IOException x) {
// Expected
}
}
@Test(timeout=10000)
/** Test symlink owner */
public void testLinkOwner() throws IOException {
Path file = new Path(testBaseDir1(), "file");
Path link = new Path(testBaseDir1(), "symlinkToFile");
createAndWriteFile(file);
wrapper.createSymlink(file, link, false);
FileStatus statFile = wrapper.getFileStatus(file);
FileStatus statLink = wrapper.getFileStatus(link);
assertEquals(statLink.getOwner(), statFile.getOwner());
}
@Test(timeout=10000)
/** Test WebHdfsFileSystem.createSymlink(..). */
public void testWebHDFS() throws IOException {
Path file = new Path(testBaseDir1(), "file");
Path link = new Path(testBaseDir1(), "linkToFile");
createAndWriteFile(file);
webhdfs.createSymlink(file, link, false);
wrapper.setReplication(link, (short)2);
assertEquals(0, wrapper.getFileLinkStatus(link).getReplication());
assertEquals(2, wrapper.getFileStatus(link).getReplication());
assertEquals(2, wrapper.getFileStatus(file).getReplication());
}
@Test(timeout=10000)
/** Test craeteSymlink(..) with quota. */
public void testQuota() throws IOException {
final Path dir = new Path(testBaseDir1());
dfs.setQuota(dir, 3, HdfsConstants.QUOTA_DONT_SET);
final Path file = new Path(dir, "file");
createAndWriteFile(file);
//creating the first link should succeed
final Path link1 = new Path(dir, "link1");
wrapper.createSymlink(file, link1, false);
try {
//creating the second link should fail with QuotaExceededException.
final Path link2 = new Path(dir, "link2");
wrapper.createSymlink(file, link2, false);
fail("Created symlink despite quota violation");
} catch(QuotaExceededException qee) {
//expected
}
}
}
| 13,252 | 37.526163 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHdfsNativeCodeLoader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import org.junit.Ignore;
import org.junit.Test;
import static org.junit.Assert.*;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.NativeCodeLoader;
public class TestHdfsNativeCodeLoader {
static final Log LOG = LogFactory.getLog(TestHdfsNativeCodeLoader.class);
private static boolean requireTestJni() {
String rtj = System.getProperty("require.test.libhadoop");
if (rtj == null) return false;
if (rtj.compareToIgnoreCase("false") == 0) return false;
return true;
}
@Test
@Ignore
public void testNativeCodeLoaded() {
if (requireTestJni() == false) {
LOG.info("TestNativeCodeLoader: libhadoop.so testing is not required.");
return;
}
if (!NativeCodeLoader.isNativeCodeLoaded()) {
String LD_LIBRARY_PATH = System.getenv().get("LD_LIBRARY_PATH");
if (LD_LIBRARY_PATH == null) LD_LIBRARY_PATH = "";
fail("TestNativeCodeLoader: libhadoop.so testing was required, but " +
"libhadoop.so was not loaded. LD_LIBRARY_PATH = " + LD_LIBRARY_PATH);
}
LOG.info("TestHdfsNativeCodeLoader: libhadoop.so is loaded.");
}
}
| 2,005 | 36.148148 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfsFileContext.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import static org.junit.Assert.fail;
import java.io.IOException;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestSymlinkHdfsFileContext extends TestSymlinkHdfs {
private static FileContext fc;
@BeforeClass
public static void testSetup() throws Exception {
fc = FileContext.getFileContext(cluster.getURI(0));
wrapper = new FileContextTestWrapper(fc, "/tmp/TestSymlinkHdfsFileContext");
}
@Test(timeout=1000)
/** Test access a symlink using AbstractFileSystem */
public void testAccessLinkFromAbstractFileSystem() throws IOException {
Path file = new Path(testBaseDir1(), "file");
Path link = new Path(testBaseDir1(), "linkToFile");
createAndWriteFile(file);
wrapper.createSymlink(file, link, false);
try {
AbstractFileSystem afs = fc.getDefaultFileSystem();
afs.open(link);
fail("Opened a link using AFS");
} catch (UnresolvedLinkException x) {
// Expected
}
}
}
| 1,804 | 33.056604 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfsFileSystem.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Test;
public class TestSymlinkHdfsFileSystem extends TestSymlinkHdfs {
@BeforeClass
public static void testSetup() throws Exception {
wrapper = new FileSystemTestWrapper(dfs, "/tmp/TestSymlinkHdfsFileSystem");
}
@Override
@Ignore("FileSystem adds missing authority in absolute URIs")
@Test(timeout=1000)
public void testCreateWithPartQualPathFails() throws IOException {}
@Ignore("FileSystem#create creates parent directories," +
" so dangling links to directories are created")
@Override
@Test(timeout=1000)
public void testCreateFileViaDanglingLinkParent() throws IOException {}
// Additional tests for DFS-only methods
@Test(timeout=10000)
public void testRecoverLease() throws IOException {
Path dir = new Path(testBaseDir1());
Path file = new Path(testBaseDir1(), "file");
Path link = new Path(testBaseDir1(), "link");
wrapper.setWorkingDirectory(dir);
createAndWriteFile(file);
wrapper.createSymlink(file, link, false);
// Attempt recoverLease through a symlink
boolean closed = dfs.recoverLease(link);
assertTrue("Expected recoverLease to return true", closed);
}
@Test(timeout=10000)
public void testIsFileClosed() throws IOException {
Path dir = new Path(testBaseDir1());
Path file = new Path(testBaseDir1(), "file");
Path link = new Path(testBaseDir1(), "link");
wrapper.setWorkingDirectory(dir);
createAndWriteFile(file);
wrapper.createSymlink(file, link, false);
// Attempt recoverLease through a symlink
boolean closed = dfs.isFileClosed(link);
assertTrue("Expected isFileClosed to return true", closed);
}
@Test(timeout=10000)
public void testConcat() throws Exception {
Path dir = new Path(testBaseDir1());
Path link = new Path(testBaseDir1(), "link");
Path dir2 = new Path(testBaseDir2());
wrapper.createSymlink(dir2, link, false);
wrapper.setWorkingDirectory(dir);
// Concat with a target and srcs through a link
Path target = new Path(link, "target");
createAndWriteFile(target);
Path[] srcs = new Path[3];
for (int i=0; i<srcs.length; i++) {
srcs[i] = new Path(link, "src-" + i);
createAndWriteFile(srcs[i]);
}
dfs.concat(target, srcs);
}
@Test(timeout=10000)
public void testSnapshot() throws Exception {
Path dir = new Path(testBaseDir1());
Path link = new Path(testBaseDir1(), "link");
Path dir2 = new Path(testBaseDir2());
wrapper.createSymlink(dir2, link, false);
wrapper.setWorkingDirectory(dir);
dfs.allowSnapshot(link);
dfs.disallowSnapshot(link);
dfs.allowSnapshot(link);
dfs.createSnapshot(link, "mcmillan");
dfs.renameSnapshot(link, "mcmillan", "seaborg");
dfs.deleteSnapshot(link, "seaborg");
}
}
| 3,803 | 34.222222 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUnbuffer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.PeerCache;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.io.IOUtils;
import org.junit.Assert;
import org.junit.Test;
public class TestUnbuffer {
private static final Log LOG =
LogFactory.getLog(TestUnbuffer.class.getName());
/**
* Test that calling Unbuffer closes sockets.
*/
@Test
public void testUnbufferClosesSockets() throws Exception {
Configuration conf = new Configuration();
// Set a new ClientContext. This way, we will have our own PeerCache,
// rather than sharing one with other unit tests.
conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT,
"testUnbufferClosesSocketsContext");
// Disable short-circuit reads. With short-circuit, we wouldn't hold open a
// TCP socket.
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, false);
// Set a really long socket timeout to avoid test timing issues.
conf.setLong(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY,
100000000L);
conf.setLong(DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY,
100000000L);
MiniDFSCluster cluster = null;
FSDataInputStream stream = null;
try {
cluster = new MiniDFSCluster.Builder(conf).build();
DistributedFileSystem dfs = (DistributedFileSystem)
FileSystem.newInstance(conf);
final Path TEST_PATH = new Path("/test1");
DFSTestUtil.createFile(dfs, TEST_PATH, 128, (short)1, 1);
stream = dfs.open(TEST_PATH);
// Read a byte. This will trigger the creation of a block reader.
stream.seek(2);
int b = stream.read();
Assert.assertTrue(-1 != b);
// The Peer cache should start off empty.
PeerCache cache = dfs.getClient().getClientContext().getPeerCache();
Assert.assertEquals(0, cache.size());
// Unbuffer should clear the block reader and return the socket to the
// cache.
stream.unbuffer();
stream.seek(2);
Assert.assertEquals(1, cache.size());
int b2 = stream.read();
Assert.assertEquals(b, b2);
} finally {
if (stream != null) {
IOUtils.cleanup(null, stream);
}
if (cluster != null) {
cluster.shutdown();
}
}
}
/**
* Test opening many files via TCP (not short-circuit).
*
* This is practical when using unbuffer, because it reduces the number of
* sockets and amount of memory that we use.
*/
@Test
public void testOpenManyFilesViaTcp() throws Exception {
final int NUM_OPENS = 500;
Configuration conf = new Configuration();
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, false);
MiniDFSCluster cluster = null;
FSDataInputStream[] streams = new FSDataInputStream[NUM_OPENS];
try {
cluster = new MiniDFSCluster.Builder(conf).build();
DistributedFileSystem dfs = cluster.getFileSystem();
final Path TEST_PATH = new Path("/testFile");
DFSTestUtil.createFile(dfs, TEST_PATH, 131072, (short)1, 1);
for (int i = 0; i < NUM_OPENS; i++) {
streams[i] = dfs.open(TEST_PATH);
LOG.info("opening file " + i + "...");
Assert.assertTrue(-1 != streams[i].read());
streams[i].unbuffer();
}
} finally {
for (FSDataInputStream stream : streams) {
IOUtils.cleanup(null, stream);
}
if (cluster != null) {
cluster.shutdown();
}
}
}
}
| 4,581 | 34.796875 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSWebHdfsFileContextMainOperations.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
import org.apache.hadoop.security.ssl.SSLFactory;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import javax.security.auth.login.LoginException;
import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import static org.apache.hadoop.fs.FileContextTestHelper.getDefaultBlockSize;
import static org.apache.hadoop.fs.FileContextTestHelper.getFileData;
/**
* Test of FileContext apis on SWebhdfs.
*/
public class TestSWebHdfsFileContextMainOperations
extends TestWebHdfsFileContextMainOperations {
private static MiniDFSCluster cluster;
private static Path defaultWorkingDirectory;
private static String keystoresDir;
private static String sslConfDir;
protected static URI webhdfsUrl;
private static final HdfsConfiguration CONF = new HdfsConfiguration();
private static final String BASEDIR =
System.getProperty("test.build.dir", "target/test-dir") + "/"
+ TestSWebHdfsFileContextMainOperations.class.getSimpleName();
protected static int numBlocks = 2;
protected static final byte[] data = getFileData(numBlocks,
getDefaultBlockSize());
private static Configuration sslConf;
@BeforeClass
public static void clusterSetupAtBeginning()
throws IOException, LoginException, URISyntaxException {
File base = new File(BASEDIR);
FileUtil.fullyDelete(base);
base.mkdirs();
keystoresDir = new File(BASEDIR).getAbsolutePath();
sslConf = new Configuration();
try {
sslConfDir = KeyStoreTestUtil
.getClasspathDir(TestSWebHdfsFileContextMainOperations.class);
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, sslConf, false);
} catch (Exception ex) {
throw new RuntimeException(ex);
}
CONF.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, "HTTPS_ONLY");
CONF.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
CONF.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
CONF.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY, "DEFAULT_AND_LOCALHOST");
cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build();
cluster.waitClusterUp();
webhdfsUrl = new URI(SWebHdfs.SCHEME + "://" + cluster.getConfiguration(0)
.get(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY));
fc = FileContext.getFileContext(webhdfsUrl, CONF);
defaultWorkingDirectory = fc.makeQualified(new Path(
"/user/" + UserGroupInformation.getCurrentUser().getShortUserName()));
fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true);
}
@Override
public URI getWebhdfsUrl() {
return webhdfsUrl;
}
@AfterClass
public static void ClusterShutdownAtEnd() throws Exception {
if (cluster != null) {
cluster.shutdown();
cluster = null;
}
FileUtil.fullyDelete(new File(BASEDIR));
KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
}
}
| 4,043 | 35.432432 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestVolumeId.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestVolumeId {
@Test
public void testEquality() {
final VolumeId id1 = new HdfsVolumeId(new byte[] { (byte)0, (byte)0 });
testEq(true, id1, id1);
final VolumeId id2 = new HdfsVolumeId(new byte[] { (byte)0, (byte)1 });
testEq(true, id2, id2);
testEq(false, id1, id2);
final VolumeId id3 = new HdfsVolumeId(new byte[] { (byte)1, (byte)0 });
testEq(true, id3, id3);
testEq(false, id1, id3);
// same as 2, but "invalid":
final VolumeId id2copy1 = new HdfsVolumeId(new byte[] { (byte)0, (byte)1 });
testEq(true, id2, id2copy1);
// same as 2copy1:
final VolumeId id2copy2 = new HdfsVolumeId(new byte[] { (byte)0, (byte)1 });
testEq(true, id2, id2copy2);
testEqMany(true, new VolumeId[] { id2, id2copy1, id2copy2 });
testEqMany(false, new VolumeId[] { id1, id2, id3 });
}
@SuppressWarnings("unchecked")
private <T> void testEq(final boolean eq, Comparable<T> id1, Comparable<T> id2) {
final int h1 = id1.hashCode();
final int h2 = id2.hashCode();
// eq reflectivity:
assertTrue(id1.equals(id1));
assertTrue(id2.equals(id2));
assertEquals(0, id1.compareTo((T)id1));
assertEquals(0, id2.compareTo((T)id2));
// eq symmetry:
assertEquals(eq, id1.equals(id2));
assertEquals(eq, id2.equals(id1));
// null comparison:
assertFalse(id1.equals(null));
assertFalse(id2.equals(null));
// compareTo:
assertEquals(eq, 0 == id1.compareTo((T)id2));
assertEquals(eq, 0 == id2.compareTo((T)id1));
// compareTo must be antisymmetric:
assertEquals(sign(id1.compareTo((T)id2)), -sign(id2.compareTo((T)id1)));
// compare with null should never return 0 to be consistent with #equals():
assertTrue(id1.compareTo(null) != 0);
assertTrue(id2.compareTo(null) != 0);
// check that hash codes did not change:
assertEquals(h1, id1.hashCode());
assertEquals(h2, id2.hashCode());
if (eq) {
// in this case the hash codes must be the same:
assertEquals(h1, h2);
}
}
private static int sign(int x) {
if (x == 0) {
return 0;
} else if (x > 0) {
return 1;
} else {
return -1;
}
}
@SuppressWarnings("unchecked")
private <T> void testEqMany(final boolean eq, Comparable<T>... volumeIds) {
Comparable<T> vidNext;
int sum = 0;
for (int i=0; i<volumeIds.length; i++) {
if (i == volumeIds.length - 1) {
vidNext = volumeIds[0];
} else {
vidNext = volumeIds[i + 1];
}
testEq(eq, volumeIds[i], vidNext);
sum += sign(volumeIds[i].compareTo((T)vidNext));
}
// the comparison relationship must always be acyclic:
assertTrue(sum < volumeIds.length);
}
/*
* Test HdfsVolumeId(new byte[0]) instances: show that we permit such
* objects, they are still valid, and obey the same equality
* rules other objects do.
*/
@Test
public void testIdEmptyBytes() {
final VolumeId idEmpty1 = new HdfsVolumeId(new byte[0]);
final VolumeId idEmpty2 = new HdfsVolumeId(new byte[0]);
final VolumeId idNotEmpty = new HdfsVolumeId(new byte[] { (byte)1 });
testEq(true, idEmpty1, idEmpty2);
testEq(false, idEmpty1, idNotEmpty);
testEq(false, idEmpty2, idNotEmpty);
}
/*
* test #toString() for typical VolumeId equality classes
*/
@Test
public void testToString() {
String strEmpty = new HdfsVolumeId(new byte[] {}).toString();
assertNotNull(strEmpty);
String strNotEmpty = new HdfsVolumeId(new byte[] { (byte)1 }).toString();
assertNotNull(strNotEmpty);
}
}
| 4,558 | 30.013605 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import static org.junit.Assert.*;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.UUID;
import java.util.regex.Pattern;
import com.google.common.collect.Ordering;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.INodeId;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.*;
public class TestGlobPaths {
private static final UserGroupInformation unprivilegedUser =
UserGroupInformation.createUserForTesting("myuser",
new String[] { "mygroup" });
static class RegexPathFilter implements PathFilter {
private final String regex;
public RegexPathFilter(String regex) {
this.regex = regex;
}
@Override
public boolean accept(Path path) {
return path.toString().matches(regex);
}
}
static private MiniDFSCluster dfsCluster;
static private FileSystem fs;
static private FileSystem privilegedFs;
static private FileContext fc;
static private FileContext privilegedFc;
static final private int NUM_OF_PATHS = 4;
static private String USER_DIR;
private final Path[] path = new Path[NUM_OF_PATHS];
@BeforeClass
public static void setUp() throws Exception {
final Configuration conf = new HdfsConfiguration();
dfsCluster = new MiniDFSCluster.Builder(conf).build();
privilegedFs = FileSystem.get(conf);
privilegedFc = FileContext.getFileContext(conf);
// allow unpriviledged user ability to create paths
privilegedFs.setPermission(new Path("/"),
FsPermission.createImmutable((short)0777));
UserGroupInformation.setLoginUser(unprivilegedUser);
fs = FileSystem.get(conf);
fc = FileContext.getFileContext(conf);
USER_DIR = fs.getHomeDirectory().toUri().getPath().toString();
}
@AfterClass
public static void tearDown() throws Exception {
if(dfsCluster!=null) {
dfsCluster.shutdown();
}
}
@Test
public void testMultiGlob() throws IOException {
FileStatus[] status;
/*
* /dir1/subdir1
* /dir1/subdir1/f1
* /dir1/subdir1/f2
* /dir1/subdir2/f1
* /dir2/subdir1
* /dir2/subdir2
* /dir2/subdir2/f1
* /dir3/f1
* /dir3/f1
* /dir3/f2(dir)
* /dir3/subdir2(file)
* /dir3/subdir3
* /dir3/subdir3/f1
* /dir3/subdir3/f1/f1
* /dir3/subdir3/f3
* /dir4
*/
Path d1 = new Path(USER_DIR, "dir1");
Path d11 = new Path(d1, "subdir1");
Path d12 = new Path(d1, "subdir2");
Path f111 = new Path(d11, "f1");
fs.createNewFile(f111);
Path f112 = new Path(d11, "f2");
fs.createNewFile(f112);
Path f121 = new Path(d12, "f1");
fs.createNewFile(f121);
Path d2 = new Path(USER_DIR, "dir2");
Path d21 = new Path(d2, "subdir1");
fs.mkdirs(d21);
Path d22 = new Path(d2, "subdir2");
Path f221 = new Path(d22, "f1");
fs.createNewFile(f221);
Path d3 = new Path(USER_DIR, "dir3");
Path f31 = new Path(d3, "f1");
fs.createNewFile(f31);
Path d32 = new Path(d3, "f2");
fs.mkdirs(d32);
Path f32 = new Path(d3, "subdir2"); // fake as a subdir!
fs.createNewFile(f32);
Path d33 = new Path(d3, "subdir3");
Path f333 = new Path(d33, "f3");
fs.createNewFile(f333);
Path d331 = new Path(d33, "f1");
Path f3311 = new Path(d331, "f1");
fs.createNewFile(f3311);
Path d4 = new Path(USER_DIR, "dir4");
fs.mkdirs(d4);
/*
* basic
*/
Path root = new Path(USER_DIR);
status = fs.globStatus(root);
checkStatus(status, root);
status = fs.globStatus(new Path(USER_DIR, "x"));
assertNull(status);
status = fs.globStatus(new Path("x"));
assertNull(status);
status = fs.globStatus(new Path(USER_DIR, "x/x"));
assertNull(status);
status = fs.globStatus(new Path("x/x"));
assertNull(status);
status = fs.globStatus(new Path(USER_DIR, "*"));
checkStatus(status, d1, d2, d3, d4);
status = fs.globStatus(new Path("*"));
checkStatus(status, d1, d2, d3, d4);
status = fs.globStatus(new Path(USER_DIR, "*/x"));
checkStatus(status);
status = fs.globStatus(new Path("*/x"));
checkStatus(status);
status = fs.globStatus(new Path(USER_DIR, "x/*"));
checkStatus(status);
status = fs.globStatus(new Path("x/*"));
checkStatus(status);
// make sure full pattern is scanned instead of bailing early with undef
status = fs.globStatus(new Path(USER_DIR, "x/x/x/*"));
checkStatus(status);
status = fs.globStatus(new Path("x/x/x/*"));
checkStatus(status);
status = fs.globStatus(new Path(USER_DIR, "*/*"));
checkStatus(status, d11, d12, d21, d22, f31, d32, f32, d33);
status = fs.globStatus(new Path("*/*"));
checkStatus(status, d11, d12, d21, d22, f31, d32, f32, d33);
/*
* one level deep
*/
status = fs.globStatus(new Path(USER_DIR, "dir*/*"));
checkStatus(status, d11, d12, d21, d22, f31, d32, f32, d33);
status = fs.globStatus(new Path("dir*/*"));
checkStatus(status, d11, d12, d21, d22, f31, d32, f32, d33);
status = fs.globStatus(new Path(USER_DIR, "dir*/subdir*"));
checkStatus(status, d11, d12, d21, d22, f32, d33);
status = fs.globStatus(new Path("dir*/subdir*"));
checkStatus(status, d11, d12, d21, d22, f32, d33);
status = fs.globStatus(new Path(USER_DIR, "dir*/f*"));
checkStatus(status, f31, d32);
status = fs.globStatus(new Path("dir*/f*"));
checkStatus(status, f31, d32);
/*
* subdir1 globs
*/
status = fs.globStatus(new Path(USER_DIR, "dir*/subdir1"));
checkStatus(status, d11, d21);
status = fs.globStatus(new Path(USER_DIR, "dir*/subdir1/*"));
checkStatus(status, f111, f112);
status = fs.globStatus(new Path(USER_DIR, "dir*/subdir1/*/*"));
checkStatus(status);
status = fs.globStatus(new Path(USER_DIR, "dir*/subdir1/x"));
checkStatus(status);
status = fs.globStatus(new Path(USER_DIR, "dir*/subdir1/x*"));
checkStatus(status);
/*
* subdir2 globs
*/
status = fs.globStatus(new Path(USER_DIR, "dir*/subdir2"));
checkStatus(status, d12, d22, f32);
status = fs.globStatus(new Path(USER_DIR, "dir*/subdir2/*"));
checkStatus(status, f121, f221);
status = fs.globStatus(new Path(USER_DIR, "dir*/subdir2/*/*"));
checkStatus(status);
/*
* subdir3 globs
*/
status = fs.globStatus(new Path(USER_DIR, "dir*/subdir3"));
checkStatus(status, d33);
status = fs.globStatus(new Path(USER_DIR, "dir*/subdir3/*"));
checkStatus(status, d331, f333);
status = fs.globStatus(new Path(USER_DIR, "dir*/subdir3/*/*"));
checkStatus(status, f3311);
status = fs.globStatus(new Path(USER_DIR, "dir*/subdir3/*/*/*"));
checkStatus(status);
/*
* file1 single dir globs
*/
status = fs.globStatus(new Path(USER_DIR, "dir*/subdir1/f1"));
checkStatus(status, f111);
status = fs.globStatus(new Path(USER_DIR, "dir*/subdir1/f1*"));
checkStatus(status, f111);
status = fs.globStatus(new Path(USER_DIR, "dir*/subdir1/f1/*"));
checkStatus(status);
status = fs.globStatus(new Path(USER_DIR, "dir*/subdir1/f1*/*"));
checkStatus(status);
/*
* file1 multi-dir globs
*/
status = fs.globStatus(new Path(USER_DIR, "dir*/subdir*/f1"));
checkStatus(status, f111, f121, f221, d331);
status = fs.globStatus(new Path(USER_DIR, "dir*/subdir*/f1*"));
checkStatus(status, f111, f121, f221, d331);
status = fs.globStatus(new Path(USER_DIR, "dir*/subdir*/f1/*"));
checkStatus(status, f3311);
status = fs.globStatus(new Path(USER_DIR, "dir*/subdir*/f1*/*"));
checkStatus(status, f3311);
status = fs.globStatus(new Path(USER_DIR, "dir*/subdir*/f1*/*"));
checkStatus(status, f3311);
status = fs.globStatus(new Path(USER_DIR, "dir*/subdir*/f1*/x"));
checkStatus(status);
status = fs.globStatus(new Path(USER_DIR, "dir*/subdir*/f1*/*/*"));
checkStatus(status);
/*
* file glob multiple files
*/
status = fs.globStatus(new Path(USER_DIR, "dir*/subdir*"));
checkStatus(status, d11, d12, d21, d22, f32, d33);
status = fs.globStatus(new Path(USER_DIR, "dir*/subdir*/*"));
checkStatus(status, f111, f112, f121, f221, d331, f333);
status = fs.globStatus(new Path(USER_DIR, "dir*/subdir*/f*"));
checkStatus(status, f111, f112, f121, f221, d331, f333);
status = fs.globStatus(new Path(USER_DIR, "dir*/subdir*/f*/*"));
checkStatus(status, f3311);
status = fs.globStatus(new Path(USER_DIR, "dir*/subdir*/*/f1"));
checkStatus(status, f3311);
status = fs.globStatus(new Path(USER_DIR, "dir*/subdir*/*/*"));
checkStatus(status, f3311);
// doesn't exist
status = fs.globStatus(new Path(USER_DIR, "dir*/subdir1/f3"));
checkStatus(status);
status = fs.globStatus(new Path(USER_DIR, "dir*/subdir1/f3*"));
checkStatus(status);
status = fs.globStatus(new Path("{x}"));
checkStatus(status);
status = fs.globStatus(new Path("{x,y}"));
checkStatus(status);
status = fs.globStatus(new Path("dir*/{x,y}"));
checkStatus(status);
status = fs.globStatus(new Path("dir*/{f1,y}"));
checkStatus(status, f31);
status = fs.globStatus(new Path("{x,y}"));
checkStatus(status);
status = fs.globStatus(new Path("/{x/x,y/y}"));
checkStatus(status);
status = fs.globStatus(new Path("{x/x,y/y}"));
checkStatus(status);
status = fs.globStatus(new Path(Path.CUR_DIR));
checkStatus(status, new Path(USER_DIR));
status = fs.globStatus(new Path(USER_DIR+"{/dir1}"));
checkStatus(status, d1);
status = fs.globStatus(new Path(USER_DIR+"{/dir*}"));
checkStatus(status, d1, d2, d3, d4);
status = fs.globStatus(new Path(Path.SEPARATOR), trueFilter);
checkStatus(status, new Path(Path.SEPARATOR));
status = fs.globStatus(new Path(Path.CUR_DIR), trueFilter);
checkStatus(status, new Path(USER_DIR));
status = fs.globStatus(d1, trueFilter);
checkStatus(status, d1);
status = fs.globStatus(new Path(USER_DIR), trueFilter);
checkStatus(status, new Path(USER_DIR));
status = fs.globStatus(new Path(USER_DIR, "*"), trueFilter);
checkStatus(status, d1, d2, d3, d4);
status = fs.globStatus(new Path("/x/*"), trueFilter);
checkStatus(status);
status = fs.globStatus(new Path("/x"), trueFilter);
assertNull(status);
status = fs.globStatus(new Path("/x/x"), trueFilter);
assertNull(status);
/*
* false filter
*/
PathFilter falseFilter = new PathFilter() {
@Override
public boolean accept(Path path) {
return false;
}
};
status = fs.globStatus(new Path(Path.SEPARATOR), falseFilter);
assertNull(status);
status = fs.globStatus(new Path(Path.CUR_DIR), falseFilter);
assertNull(status);
status = fs.globStatus(new Path(USER_DIR), falseFilter);
assertNull(status);
status = fs.globStatus(new Path(USER_DIR, "*"), falseFilter);
checkStatus(status);
status = fs.globStatus(new Path("/x/*"), falseFilter);
checkStatus(status);
status = fs.globStatus(new Path("/x"), falseFilter);
assertNull(status);
status = fs.globStatus(new Path("/x/x"), falseFilter);
assertNull(status);
cleanupDFS();
}
private void checkStatus(FileStatus[] status, Path ... expectedMatches) {
assertNotNull(status);
String[] paths = new String[status.length];
for (int i=0; i < status.length; i++) {
paths[i] = getPathFromStatus(status[i]);
}
String got = StringUtils.join(paths, "\n");
String expected = StringUtils.join(expectedMatches, "\n");
assertEquals(expected, got);
}
private String getPathFromStatus(FileStatus status) {
return status.getPath().toUri().getPath();
}
@Test
public void testPathFilter() throws IOException {
try {
String[] files = new String[] { USER_DIR + "/a", USER_DIR + "/a/b" };
Path[] matchedPath = prepareTesting(USER_DIR + "/*/*", files,
new RegexPathFilter("^.*" + Pattern.quote(USER_DIR) + "/a/b"));
assertEquals(1, matchedPath.length);
assertEquals(path[1], matchedPath[0]);
} finally {
cleanupDFS();
}
}
@Test
public void testPathFilterWithFixedLastComponent() throws IOException {
try {
String[] files = new String[] { USER_DIR + "/a", USER_DIR + "/a/b",
USER_DIR + "/c", USER_DIR + "/c/b", };
Path[] matchedPath = prepareTesting(USER_DIR + "/*/b", files,
new RegexPathFilter("^.*" + Pattern.quote(USER_DIR) + "/a/b"));
assertEquals(matchedPath.length, 1);
assertEquals(matchedPath[0], path[1]);
} finally {
cleanupDFS();
}
}
@Test
public void pTestLiteral() throws IOException {
try {
String [] files = new String[] {USER_DIR+"/a2c", USER_DIR+"/abc.d"};
Path[] matchedPath = prepareTesting(USER_DIR+"/abc.d", files);
assertEquals(matchedPath.length, 1);
assertEquals(matchedPath[0], path[1]);
} finally {
cleanupDFS();
}
}
@Test
public void pTestEscape() throws IOException {
// Skip the test case on Windows because backslash will be treated as a
// path separator instead of an escaping character on Windows.
org.junit.Assume.assumeTrue(!Path.WINDOWS);
try {
String [] files = new String[] {USER_DIR+"/ab\\[c.d"};
Path[] matchedPath = prepareTesting(USER_DIR+"/ab\\[c.d", files);
assertEquals(matchedPath.length, 1);
assertEquals(matchedPath[0], path[0]);
} finally {
cleanupDFS();
}
}
@Test
public void pTestAny() throws IOException {
try {
String [] files = new String[] { USER_DIR+"/abc", USER_DIR+"/a2c",
USER_DIR+"/a.c", USER_DIR+"/abcd"};
Path[] matchedPath = prepareTesting(USER_DIR+"/a?c", files);
assertEquals(matchedPath.length, 3);
assertEquals(matchedPath[0], path[2]);
assertEquals(matchedPath[1], path[1]);
assertEquals(matchedPath[2], path[0]);
} finally {
cleanupDFS();
}
}
@Test
public void pTestClosure1() throws IOException {
try {
String [] files = new String[] {USER_DIR+"/a", USER_DIR+"/abc",
USER_DIR+"/abc.p", USER_DIR+"/bacd"};
Path[] matchedPath = prepareTesting(USER_DIR+"/a*", files);
assertEquals(matchedPath.length, 3);
assertEquals(matchedPath[0], path[0]);
assertEquals(matchedPath[1], path[1]);
assertEquals(matchedPath[2], path[2]);
} finally {
cleanupDFS();
}
}
@Test
public void pTestClosure2() throws IOException {
try {
String [] files = new String[] {USER_DIR+"/a.", USER_DIR+"/a.txt",
USER_DIR+"/a.old.java", USER_DIR+"/.java"};
Path[] matchedPath = prepareTesting(USER_DIR+"/a.*", files);
assertEquals(matchedPath.length, 3);
assertEquals(matchedPath[0], path[0]);
assertEquals(matchedPath[1], path[2]);
assertEquals(matchedPath[2], path[1]);
} finally {
cleanupDFS();
}
}
@Test
public void pTestClosure3() throws IOException {
try {
String [] files = new String[] {USER_DIR+"/a.txt.x", USER_DIR+"/ax",
USER_DIR+"/ab37x", USER_DIR+"/bacd"};
Path[] matchedPath = prepareTesting(USER_DIR+"/a*x", files);
assertEquals(matchedPath.length, 3);
assertEquals(matchedPath[0], path[0]);
assertEquals(matchedPath[1], path[2]);
assertEquals(matchedPath[2], path[1]);
} finally {
cleanupDFS();
}
}
@Test
public void pTestClosure4() throws IOException {
try {
String [] files = new String[] {USER_DIR+"/dir1/file1",
USER_DIR+"/dir2/file2",
USER_DIR+"/dir3/file1"};
Path[] matchedPath = prepareTesting(USER_DIR+"/*/file1", files);
assertEquals(matchedPath.length, 2);
assertEquals(matchedPath[0], path[0]);
assertEquals(matchedPath[1], path[2]);
} finally {
cleanupDFS();
}
}
@Test
public void pTestClosure5() throws IOException {
try {
String [] files = new String[] {USER_DIR+"/dir1/file1",
USER_DIR+"/file1"};
Path[] matchedPath = prepareTesting(USER_DIR+"/*/file1", files);
assertEquals(matchedPath.length, 1);
assertEquals(matchedPath[0], path[0]);
} finally {
cleanupDFS();
}
}
@Test
public void pTestSet() throws IOException {
try {
String [] files = new String[] {USER_DIR+"/a.c", USER_DIR+"/a.cpp",
USER_DIR+"/a.hlp", USER_DIR+"/a.hxy"};
Path[] matchedPath = prepareTesting(USER_DIR+"/a.[ch]??", files);
assertEquals(matchedPath.length, 3);
assertEquals(matchedPath[0], path[1]);
assertEquals(matchedPath[1], path[2]);
assertEquals(matchedPath[2], path[3]);
} finally {
cleanupDFS();
}
}
@Test
public void pTestRange() throws IOException {
try {
String [] files = new String[] {USER_DIR+"/a.d", USER_DIR+"/a.e",
USER_DIR+"/a.f", USER_DIR+"/a.h"};
Path[] matchedPath = prepareTesting(USER_DIR+"/a.[d-fm]", files);
assertEquals(matchedPath.length, 3);
assertEquals(matchedPath[0], path[0]);
assertEquals(matchedPath[1], path[1]);
assertEquals(matchedPath[2], path[2]);
} finally {
cleanupDFS();
}
}
@Test
public void pTestSetExcl() throws IOException {
try {
String [] files = new String[] {USER_DIR+"/a.d", USER_DIR+"/a.e",
USER_DIR+"/a.0", USER_DIR+"/a.h"};
Path[] matchedPath = prepareTesting(USER_DIR+"/a.[^a-cg-z0-9]", files);
assertEquals(matchedPath.length, 2);
assertEquals(matchedPath[0], path[0]);
assertEquals(matchedPath[1], path[1]);
} finally {
cleanupDFS();
}
}
@Test
public void pTestCombination() throws IOException {
try {
String [] files = new String[] {"/user/aa/a.c", "/user/bb/a.cpp",
"/user1/cc/b.hlp", "/user/dd/a.hxy"};
Path[] matchedPath = prepareTesting("/use?/*/a.[ch]{lp,xy}", files);
assertEquals(matchedPath.length, 1);
assertEquals(matchedPath[0], path[3]);
} finally {
cleanupDFS();
}
}
/* Test {xx,yy} */
@Test
public void pTestCurlyBracket() throws IOException {
Path[] matchedPath;
String [] files;
try {
files = new String[] { USER_DIR+"/a.abcxx", USER_DIR+"/a.abxy",
USER_DIR+"/a.hlp", USER_DIR+"/a.jhyy"};
matchedPath = prepareTesting(USER_DIR+"/a.{abc,jh}??", files);
assertEquals(matchedPath.length, 2);
assertEquals(matchedPath[0], path[0]);
assertEquals(matchedPath[1], path[3]);
} finally {
cleanupDFS();
}
// nested curlies
try {
files = new String[] { USER_DIR+"/a.abcxx", USER_DIR+"/a.abdxy",
USER_DIR+"/a.hlp", USER_DIR+"/a.jhyy" };
matchedPath = prepareTesting(USER_DIR+"/a.{ab{c,d},jh}??", files);
assertEquals(matchedPath.length, 3);
assertEquals(matchedPath[0], path[0]);
assertEquals(matchedPath[1], path[1]);
assertEquals(matchedPath[2], path[3]);
} finally {
cleanupDFS();
}
// cross-component curlies
try {
files = new String[] { USER_DIR+"/a/b", USER_DIR+"/a/d",
USER_DIR+"/c/b", USER_DIR+"/c/d" };
matchedPath = prepareTesting(USER_DIR+"/{a/b,c/d}", files);
assertEquals(matchedPath.length, 2);
assertEquals(matchedPath[0], path[0]);
assertEquals(matchedPath[1], path[3]);
} finally {
cleanupDFS();
}
// cross-component absolute curlies
try {
files = new String[] { "/a/b", "/a/d",
"/c/b", "/c/d" };
matchedPath = prepareTesting("{/a/b,/c/d}", files);
assertEquals(matchedPath.length, 2);
assertEquals(matchedPath[0], path[0]);
assertEquals(matchedPath[1], path[3]);
} finally {
cleanupDFS();
}
try {
// test standalone }
files = new String[] {USER_DIR+"/}bc", USER_DIR+"/}c"};
matchedPath = prepareTesting(USER_DIR+"/}{a,b}c", files);
assertEquals(matchedPath.length, 1);
assertEquals(matchedPath[0], path[0]);
// test {b}
matchedPath = prepareTesting(USER_DIR+"/}{b}c", files);
assertEquals(matchedPath.length, 1);
assertEquals(matchedPath[0], path[0]);
// test {}
matchedPath = prepareTesting(USER_DIR+"/}{}bc", files);
assertEquals(matchedPath.length, 1);
assertEquals(matchedPath[0], path[0]);
// test {,}
matchedPath = prepareTesting(USER_DIR+"/}{,}bc", files);
assertEquals(matchedPath.length, 1);
assertEquals(matchedPath[0], path[0]);
// test {b,}
matchedPath = prepareTesting(USER_DIR+"/}{b,}c", files);
assertEquals(matchedPath.length, 2);
assertEquals(matchedPath[0], path[0]);
assertEquals(matchedPath[1], path[1]);
// test {,b}
matchedPath = prepareTesting(USER_DIR+"/}{,b}c", files);
assertEquals(matchedPath.length, 2);
assertEquals(matchedPath[0], path[0]);
assertEquals(matchedPath[1], path[1]);
// test a combination of {} and ?
matchedPath = prepareTesting(USER_DIR+"/}{ac,?}", files);
assertEquals(matchedPath.length, 1);
assertEquals(matchedPath[0], path[1]);
// test ill-formed curly
boolean hasException = false;
try {
prepareTesting(USER_DIR+"}{bc", files);
} catch (IOException e) {
assertTrue(e.getMessage().startsWith("Illegal file pattern:") );
hasException = true;
}
assertTrue(hasException);
} finally {
cleanupDFS();
}
}
/* test that a path name can contain Java regex special characters */
@Test
public void pTestJavaRegexSpecialChars() throws IOException {
try {
String[] files = new String[] {USER_DIR+"/($.|+)bc", USER_DIR+"/abc"};
Path[] matchedPath = prepareTesting(USER_DIR+"/($.|+)*", files);
assertEquals(matchedPath.length, 1);
assertEquals(matchedPath[0], path[0]);
} finally {
cleanupDFS();
}
}
private Path[] prepareTesting(String pattern, String[] files)
throws IOException {
for(int i=0; i<Math.min(NUM_OF_PATHS, files.length); i++) {
path[i] = fs.makeQualified(new Path(files[i]));
if (!fs.mkdirs(path[i])) {
throw new IOException("Mkdirs failed to create " + path[i].toString());
}
}
Path patternPath = new Path(pattern);
Path[] globResults = FileUtil.stat2Paths(fs.globStatus(patternPath),
patternPath);
for(int i=0; i<globResults.length; i++) {
globResults[i] =
globResults[i].makeQualified(fs.getUri(), fs.getWorkingDirectory());
}
return globResults;
}
private Path[] prepareTesting(String pattern, String[] files,
PathFilter filter) throws IOException {
for(int i=0; i<Math.min(NUM_OF_PATHS, files.length); i++) {
path[i] = fs.makeQualified(new Path(files[i]));
if (!fs.mkdirs(path[i])) {
throw new IOException("Mkdirs failed to create " + path[i].toString());
}
}
Path patternPath = new Path(pattern);
Path[] globResults = FileUtil.stat2Paths(fs.globStatus(patternPath, filter),
patternPath);
for(int i=0; i<globResults.length; i++) {
globResults[i] =
globResults[i].makeQualified(fs.getUri(), fs.getWorkingDirectory());
}
return globResults;
}
private void cleanupDFS() throws IOException {
fs.delete(new Path(USER_DIR), true);
}
/**
* A glob test that can be run on either FileContext or FileSystem.
*/
private abstract class FSTestWrapperGlobTest {
FSTestWrapperGlobTest(boolean useFc) {
if (useFc) {
this.privWrap = new FileContextTestWrapper(privilegedFc);
this.wrap = new FileContextTestWrapper(fc);
} else {
this.privWrap = new FileSystemTestWrapper(privilegedFs);
this.wrap = new FileSystemTestWrapper(fs);
}
}
abstract void run() throws Exception;
final FSTestWrapper privWrap;
final FSTestWrapper wrap;
}
/**
* Run a glob test on FileSystem.
*/
private void testOnFileSystem(FSTestWrapperGlobTest test) throws Exception {
try {
fc.mkdir(new Path(USER_DIR), FsPermission.getDefault(), true);
test.run();
} finally {
fc.delete(new Path(USER_DIR), true);
}
}
/**
* Run a glob test on FileContext.
*/
private void testOnFileContext(FSTestWrapperGlobTest test) throws Exception {
try {
fs.mkdirs(new Path(USER_DIR));
test.run();
} finally {
cleanupDFS();
}
}
/**
* Accept all paths.
*/
private static class AcceptAllPathFilter implements PathFilter {
@Override
public boolean accept(Path path) {
return true;
}
}
private static final PathFilter trueFilter = new AcceptAllPathFilter();
/**
* Accept only paths ending in Z.
*/
private static class AcceptPathsEndingInZ implements PathFilter {
@Override
public boolean accept(Path path) {
String stringPath = path.toUri().getPath();
return stringPath.endsWith("z");
}
}
/**
* Test globbing through symlinks.
*/
private class TestGlobWithSymlinks extends FSTestWrapperGlobTest {
TestGlobWithSymlinks(boolean useFc) {
super(useFc);
}
void run() throws Exception {
// Test that globbing through a symlink to a directory yields a path
// containing that symlink.
wrap.mkdir(new Path(USER_DIR + "/alpha"), FsPermission.getDirDefault(),
false);
wrap.createSymlink(new Path(USER_DIR + "/alpha"), new Path(USER_DIR
+ "/alphaLink"), false);
wrap.mkdir(new Path(USER_DIR + "/alphaLink/beta"),
FsPermission.getDirDefault(), false);
// Test simple glob
FileStatus[] statuses = wrap.globStatus(new Path(USER_DIR + "/alpha/*"),
new AcceptAllPathFilter());
Assert.assertEquals(1, statuses.length);
Assert.assertEquals(USER_DIR + "/alpha/beta", statuses[0].getPath()
.toUri().getPath());
// Test glob through symlink
statuses = wrap.globStatus(new Path(USER_DIR + "/alphaLink/*"),
new AcceptAllPathFilter());
Assert.assertEquals(1, statuses.length);
Assert.assertEquals(USER_DIR + "/alphaLink/beta", statuses[0].getPath()
.toUri().getPath());
// If the terminal path component in a globbed path is a symlink,
// we don't dereference that link.
wrap.createSymlink(new Path("beta"), new Path(USER_DIR
+ "/alphaLink/betaLink"), false);
statuses = wrap.globStatus(new Path(USER_DIR + "/alpha/betaLi*"),
new AcceptAllPathFilter());
Assert.assertEquals(1, statuses.length);
Assert.assertEquals(USER_DIR + "/alpha/betaLink", statuses[0].getPath()
.toUri().getPath());
// todo: test symlink-to-symlink-to-dir, etc.
}
}
@Ignore
@Test
public void testGlobWithSymlinksOnFS() throws Exception {
testOnFileSystem(new TestGlobWithSymlinks(false));
}
@Ignore
@Test
public void testGlobWithSymlinksOnFC() throws Exception {
testOnFileContext(new TestGlobWithSymlinks(true));
}
/**
* Test globbing symlinks to symlinks.
*
* Also test globbing dangling symlinks. It should NOT throw any exceptions!
*/
private class TestGlobWithSymlinksToSymlinks extends
FSTestWrapperGlobTest {
TestGlobWithSymlinksToSymlinks(boolean useFc) {
super(useFc);
}
void run() throws Exception {
// Test that globbing through a symlink to a symlink to a directory
// fully resolves
wrap.mkdir(new Path(USER_DIR + "/alpha"), FsPermission.getDirDefault(),
false);
wrap.createSymlink(new Path(USER_DIR + "/alpha"), new Path(USER_DIR
+ "/alphaLink"), false);
wrap.createSymlink(new Path(USER_DIR + "/alphaLink"), new Path(USER_DIR
+ "/alphaLinkLink"), false);
wrap.mkdir(new Path(USER_DIR + "/alpha/beta"),
FsPermission.getDirDefault(), false);
// Test glob through symlink to a symlink to a directory
FileStatus statuses[] = wrap.globStatus(new Path(USER_DIR
+ "/alphaLinkLink"), new AcceptAllPathFilter());
Assert.assertEquals(1, statuses.length);
Assert.assertEquals(USER_DIR + "/alphaLinkLink", statuses[0].getPath()
.toUri().getPath());
statuses = wrap.globStatus(new Path(USER_DIR + "/alphaLinkLink/*"),
new AcceptAllPathFilter());
Assert.assertEquals(1, statuses.length);
Assert.assertEquals(USER_DIR + "/alphaLinkLink/beta", statuses[0]
.getPath().toUri().getPath());
// Test glob of dangling symlink (theta does not actually exist)
wrap.createSymlink(new Path(USER_DIR + "theta"), new Path(USER_DIR
+ "/alpha/kappa"), false);
statuses = wrap.globStatus(new Path(USER_DIR + "/alpha/kappa/kappa"),
new AcceptAllPathFilter());
Assert.assertNull(statuses);
// Test glob of symlinks
wrap.createFile(USER_DIR + "/alpha/beta/gamma");
wrap.createSymlink(new Path(USER_DIR + "gamma"), new Path(USER_DIR
+ "/alpha/beta/gammaLink"), false);
wrap.createSymlink(new Path(USER_DIR + "gammaLink"), new Path(USER_DIR
+ "/alpha/beta/gammaLinkLink"), false);
wrap.createSymlink(new Path(USER_DIR + "gammaLinkLink"), new Path(
USER_DIR + "/alpha/beta/gammaLinkLinkLink"), false);
statuses = wrap.globStatus(new Path(USER_DIR
+ "/alpha/*/gammaLinkLinkLink"), new AcceptAllPathFilter());
Assert.assertEquals(1, statuses.length);
Assert.assertEquals(USER_DIR + "/alpha/beta/gammaLinkLinkLink",
statuses[0].getPath().toUri().getPath());
statuses = wrap.globStatus(new Path(USER_DIR + "/alpha/beta/*"),
new AcceptAllPathFilter());
Assert.assertEquals(USER_DIR + "/alpha/beta/gamma;" + USER_DIR
+ "/alpha/beta/gammaLink;" + USER_DIR + "/alpha/beta/gammaLinkLink;"
+ USER_DIR + "/alpha/beta/gammaLinkLinkLink",
TestPath.mergeStatuses(statuses));
// Let's create two symlinks that point to each other, and glob on them.
wrap.createSymlink(new Path(USER_DIR + "tweedledee"), new Path(USER_DIR
+ "/tweedledum"), false);
wrap.createSymlink(new Path(USER_DIR + "tweedledum"), new Path(USER_DIR
+ "/tweedledee"), false);
statuses = wrap.globStatus(
new Path(USER_DIR + "/tweedledee/unobtainium"),
new AcceptAllPathFilter());
Assert.assertNull(statuses);
}
}
@Ignore
@Test
public void testGlobWithSymlinksToSymlinksOnFS() throws Exception {
testOnFileSystem(new TestGlobWithSymlinksToSymlinks(false));
}
@Ignore
@Test
public void testGlobWithSymlinksToSymlinksOnFC() throws Exception {
testOnFileContext(new TestGlobWithSymlinksToSymlinks(true));
}
/**
* Test globbing symlinks with a custom PathFilter
*/
private class TestGlobSymlinksWithCustomPathFilter extends
FSTestWrapperGlobTest {
TestGlobSymlinksWithCustomPathFilter(boolean useFc) {
super(useFc);
}
void run() throws Exception {
// Test that globbing through a symlink to a symlink to a directory
// fully resolves
wrap.mkdir(new Path(USER_DIR + "/alpha"), FsPermission.getDirDefault(),
false);
wrap.createSymlink(new Path(USER_DIR + "/alpha"), new Path(USER_DIR
+ "/alphaLinkz"), false);
wrap.mkdir(new Path(USER_DIR + "/alpha/beta"),
FsPermission.getDirDefault(), false);
wrap.mkdir(new Path(USER_DIR + "/alpha/betaz"),
FsPermission.getDirDefault(), false);
// Test glob through symlink to a symlink to a directory, with a
// PathFilter
FileStatus statuses[] = wrap.globStatus(
new Path(USER_DIR + "/alpha/beta"), new AcceptPathsEndingInZ());
Assert.assertNull(statuses);
statuses = wrap.globStatus(new Path(USER_DIR + "/alphaLinkz/betaz"),
new AcceptPathsEndingInZ());
Assert.assertEquals(1, statuses.length);
Assert.assertEquals(USER_DIR + "/alphaLinkz/betaz", statuses[0].getPath()
.toUri().getPath());
statuses = wrap.globStatus(new Path(USER_DIR + "/*/*"),
new AcceptPathsEndingInZ());
Assert.assertEquals(USER_DIR + "/alpha/betaz;" + USER_DIR
+ "/alphaLinkz/betaz", TestPath.mergeStatuses(statuses));
statuses = wrap.globStatus(new Path(USER_DIR + "/*/*"),
new AcceptAllPathFilter());
Assert.assertEquals(USER_DIR + "/alpha/beta;" + USER_DIR
+ "/alpha/betaz;" + USER_DIR + "/alphaLinkz/beta;" + USER_DIR
+ "/alphaLinkz/betaz", TestPath.mergeStatuses(statuses));
}
}
@Ignore
@Test
public void testGlobSymlinksWithCustomPathFilterOnFS() throws Exception {
testOnFileSystem(new TestGlobSymlinksWithCustomPathFilter(false));
}
@Ignore
@Test
public void testGlobSymlinksWithCustomPathFilterOnFC() throws Exception {
testOnFileContext(new TestGlobSymlinksWithCustomPathFilter(true));
}
/**
* Test that globStatus fills in the scheme even when it is not provided.
*/
private class TestGlobFillsInScheme extends FSTestWrapperGlobTest {
TestGlobFillsInScheme(boolean useFc) {
super(useFc);
}
void run() throws Exception {
// Verify that the default scheme is hdfs, when we don't supply one.
wrap.mkdir(new Path(USER_DIR + "/alpha"), FsPermission.getDirDefault(),
false);
wrap.createSymlink(new Path(USER_DIR + "/alpha"), new Path(USER_DIR
+ "/alphaLink"), false);
FileStatus statuses[] = wrap.globStatus(
new Path(USER_DIR + "/alphaLink"), new AcceptAllPathFilter());
Assert.assertEquals(1, statuses.length);
Path path = statuses[0].getPath();
Assert.assertEquals(USER_DIR + "/alpha", path.toUri().getPath());
Assert.assertEquals("hdfs", path.toUri().getScheme());
// FileContext can list a file:/// URI.
// Since everyone should have the root directory, we list that.
statuses = fc.util().globStatus(new Path("file:///"),
new AcceptAllPathFilter());
Assert.assertEquals(1, statuses.length);
Path filePath = statuses[0].getPath();
Assert.assertEquals("file", filePath.toUri().getScheme());
Assert.assertEquals("/", filePath.toUri().getPath());
// The FileSystem should have scheme 'hdfs'
Assert.assertEquals("hdfs", fs.getScheme());
}
}
@Test
public void testGlobFillsInSchemeOnFS() throws Exception {
testOnFileSystem(new TestGlobFillsInScheme(false));
}
@Test
public void testGlobFillsInSchemeOnFC() throws Exception {
testOnFileContext(new TestGlobFillsInScheme(true));
}
/**
* Test that globStatus works with relative paths.
**/
private class TestRelativePath extends FSTestWrapperGlobTest {
TestRelativePath(boolean useFc) {
super(useFc);
}
void run() throws Exception {
String[] files = new String[] { "a", "abc", "abc.p", "bacd" };
Path[] path = new Path[files.length];
for(int i=0; i < files.length; i++) {
path[i] = wrap.makeQualified(new Path(files[i]));
wrap.mkdir(path[i], FsPermission.getDirDefault(), true);
}
Path patternPath = new Path("a*");
Path[] globResults = FileUtil.stat2Paths(wrap.globStatus(patternPath,
new AcceptAllPathFilter()),
patternPath);
for(int i=0; i < globResults.length; i++) {
globResults[i] = wrap.makeQualified(globResults[i]);
}
assertEquals(globResults.length, 3);
// The default working directory for FileSystem is the user's home
// directory. For FileContext, the default is based on the UNIX user that
// started the jvm. This is arguably a bug (see HADOOP-10944 for
// details). We work around it here by explicitly calling
// getWorkingDirectory and going from there.
String pwd = wrap.getWorkingDirectory().toUri().getPath();
assertEquals(pwd + "/a;" + pwd + "/abc;" + pwd + "/abc.p",
TestPath.mergeStatuses(globResults));
}
}
@Test
public void testRelativePathOnFS() throws Exception {
testOnFileSystem(new TestRelativePath(false));
}
@Test
public void testRelativePathOnFC() throws Exception {
testOnFileContext(new TestRelativePath(true));
}
/**
* Test that trying to glob through a directory we don't have permission
* to list fails with AccessControlException rather than succeeding or
* throwing any other exception.
**/
private class TestGlobAccessDenied extends FSTestWrapperGlobTest {
TestGlobAccessDenied(boolean useFc) {
super(useFc);
}
void run() throws Exception {
privWrap.mkdir(new Path("/nopermission/val"),
new FsPermission((short)0777), true);
privWrap.mkdir(new Path("/norestrictions/val"),
new FsPermission((short)0777), true);
privWrap.setPermission(new Path("/nopermission"),
new FsPermission((short)0));
try {
wrap.globStatus(new Path("/no*/*"),
new AcceptAllPathFilter());
Assert.fail("expected to get an AccessControlException when " +
"globbing through a directory we don't have permissions " +
"to list.");
} catch (AccessControlException ioe) {
}
Assert.assertEquals("/norestrictions/val",
TestPath.mergeStatuses(wrap.globStatus(
new Path("/norestrictions/*"),
new AcceptAllPathFilter())));
}
}
@Test
public void testGlobAccessDeniedOnFS() throws Exception {
testOnFileSystem(new TestGlobAccessDenied(false));
}
@Test
public void testGlobAccessDeniedOnFC() throws Exception {
testOnFileContext(new TestGlobAccessDenied(true));
}
/**
* Test that trying to list a reserved path on HDFS via the globber works.
**/
private class TestReservedHdfsPaths extends FSTestWrapperGlobTest {
TestReservedHdfsPaths(boolean useFc) {
super(useFc);
}
void run() throws Exception {
String reservedRoot = "/.reserved/.inodes/" + INodeId.ROOT_INODE_ID;
Assert.assertEquals(reservedRoot,
TestPath.mergeStatuses(wrap.
globStatus(new Path(reservedRoot), new AcceptAllPathFilter())));
// These inodes don't show up via listStatus.
Assert.assertEquals("",
TestPath.mergeStatuses(wrap.
globStatus(new Path("/.reserved/*"), new AcceptAllPathFilter())));
}
}
@Test
public void testReservedHdfsPathsOnFS() throws Exception {
testOnFileSystem(new TestReservedHdfsPaths(false));
}
@Test
public void testReservedHdfsPathsOnFC() throws Exception {
testOnFileContext(new TestReservedHdfsPaths(true));
}
/**
* Test trying to glob the root. Regression test for HDFS-5888.
**/
private class TestGlobRoot extends FSTestWrapperGlobTest {
TestGlobRoot (boolean useFc) {
super(useFc);
}
void run() throws Exception {
final Path rootPath = new Path("/");
FileStatus oldRootStatus = wrap.getFileStatus(rootPath);
String newOwner = UUID.randomUUID().toString();
privWrap.setOwner(new Path("/"), newOwner, null);
FileStatus[] status =
wrap.globStatus(rootPath, new AcceptAllPathFilter());
Assert.assertEquals(1, status.length);
Assert.assertEquals(newOwner, status[0].getOwner());
privWrap.setOwner(new Path("/"), oldRootStatus.getOwner(), null);
}
}
@Test
public void testGlobRootOnFS() throws Exception {
testOnFileSystem(new TestGlobRoot(false));
}
@Test
public void testGlobRootOnFC() throws Exception {
testOnFileContext(new TestGlobRoot(true));
}
/**
* Test glob expressions that don't appear at the end of the path. Regression
* test for HADOOP-10957.
**/
private class TestNonTerminalGlobs extends FSTestWrapperGlobTest {
TestNonTerminalGlobs(boolean useFc) {
super(useFc);
}
void run() throws Exception {
try {
privWrap.mkdir(new Path("/filed_away/alpha"),
new FsPermission((short)0777), true);
privWrap.createFile(new Path("/filed"), 0);
FileStatus[] statuses =
wrap.globStatus(new Path("/filed*/alpha"),
new AcceptAllPathFilter());
Assert.assertEquals(1, statuses.length);
Assert.assertEquals("/filed_away/alpha", statuses[0].getPath()
.toUri().getPath());
privWrap.mkdir(new Path("/filed_away/alphabet"),
new FsPermission((short)0777), true);
privWrap.mkdir(new Path("/filed_away/alphabet/abc"),
new FsPermission((short)0777), true);
statuses = wrap.globStatus(new Path("/filed*/alph*/*b*"),
new AcceptAllPathFilter());
Assert.assertEquals(1, statuses.length);
Assert.assertEquals("/filed_away/alphabet/abc", statuses[0].getPath()
.toUri().getPath());
} finally {
privWrap.delete(new Path("/filed"), true);
privWrap.delete(new Path("/filed_away"), true);
}
}
}
@Test
public void testNonTerminalGlobsOnFS() throws Exception {
testOnFileSystem(new TestNonTerminalGlobs(false));
}
@Test
public void testNonTerminalGlobsOnFC() throws Exception {
testOnFileContext(new TestNonTerminalGlobs(true));
}
@Test
public void testLocalFilesystem() throws Exception {
Configuration conf = new Configuration();
FileSystem fs = FileSystem.getLocal(conf);
String localTmp = System.getProperty("java.io.tmpdir");
Path base = new Path(new Path(localTmp), UUID.randomUUID().toString());
Assert.assertTrue(fs.mkdirs(base));
Assert.assertTrue(fs.mkdirs(new Path(base, "e")));
Assert.assertTrue(fs.mkdirs(new Path(base, "c")));
Assert.assertTrue(fs.mkdirs(new Path(base, "a")));
Assert.assertTrue(fs.mkdirs(new Path(base, "d")));
Assert.assertTrue(fs.mkdirs(new Path(base, "b")));
fs.deleteOnExit(base);
FileStatus[] status = fs.globStatus(new Path(base, "*"));
ArrayList list = new ArrayList();
for (FileStatus f: status) {
list.add(f.getPath().toString());
}
boolean sorted = Ordering.natural().isOrdered(list);
Assert.assertTrue(sorted);
}
}
| 44,107 | 32.593298 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestXAttr.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotSame;
import org.junit.BeforeClass;
import org.junit.Test;
/**
* Tests for <code>XAttr</code> objects.
*/
public class TestXAttr {
private static XAttr XATTR, XATTR1, XATTR2, XATTR3, XATTR4, XATTR5;
@BeforeClass
public static void setUp() throws Exception {
byte[] value = {0x31, 0x32, 0x33};
XATTR = new XAttr.Builder()
.setName("name")
.setValue(value)
.build();
XATTR1 = new XAttr.Builder()
.setNameSpace(XAttr.NameSpace.USER)
.setName("name")
.setValue(value)
.build();
XATTR2 = new XAttr.Builder()
.setNameSpace(XAttr.NameSpace.TRUSTED)
.setName("name")
.setValue(value)
.build();
XATTR3 = new XAttr.Builder()
.setNameSpace(XAttr.NameSpace.SYSTEM)
.setName("name")
.setValue(value)
.build();
XATTR4 = new XAttr.Builder()
.setNameSpace(XAttr.NameSpace.SECURITY)
.setName("name")
.setValue(value)
.build();
XATTR5 = new XAttr.Builder()
.setNameSpace(XAttr.NameSpace.RAW)
.setName("name")
.setValue(value)
.build();
}
@Test
public void testXAttrEquals() {
assertNotSame(XATTR1, XATTR2);
assertNotSame(XATTR2, XATTR3);
assertNotSame(XATTR3, XATTR4);
assertNotSame(XATTR4, XATTR5);
assertEquals(XATTR, XATTR1);
assertEquals(XATTR1, XATTR1);
assertEquals(XATTR2, XATTR2);
assertEquals(XATTR3, XATTR3);
assertEquals(XATTR4, XATTR4);
assertEquals(XATTR5, XATTR5);
assertFalse(XATTR1.equals(XATTR2));
assertFalse(XATTR2.equals(XATTR3));
assertFalse(XATTR3.equals(XATTR4));
assertFalse(XATTR4.equals(XATTR5));
}
@Test
public void testXAttrHashCode() {
assertEquals(XATTR.hashCode(), XATTR1.hashCode());
assertFalse(XATTR1.hashCode() == XATTR2.hashCode());
assertFalse(XATTR2.hashCode() == XATTR3.hashCode());
assertFalse(XATTR3.hashCode() == XATTR4.hashCode());
assertFalse(XATTR4.hashCode() == XATTR5.hashCode());
}
}
| 2,957 | 30.136842 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.test.PathUtils;
import org.junit.Test;
/**
* Test of the URL stream handler.
*/
public class TestUrlStreamHandler {
private static final File TEST_ROOT_DIR = PathUtils.getTestDir(TestUrlStreamHandler.class);
/**
* Test opening and reading from an InputStream through a hdfs:// URL.
* <p>
* First generate a file with some content through the FileSystem API, then
* try to open and read the file through the URL stream API.
*
* @throws IOException
*/
@Test
public void testDfsUrls() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs = cluster.getFileSystem();
// Setup our own factory
// setURLSteramHandlerFactor is can be set at most once in the JVM
// the new URLStreamHandler is valid for all tests cases
// in TestStreamHandler
FsUrlStreamHandlerFactory factory =
new org.apache.hadoop.fs.FsUrlStreamHandlerFactory();
java.net.URL.setURLStreamHandlerFactory(factory);
Path filePath = new Path("/thefile");
try {
byte[] fileContent = new byte[1024];
for (int i = 0; i < fileContent.length; ++i)
fileContent[i] = (byte) i;
// First create the file through the FileSystem API
OutputStream os = fs.create(filePath);
os.write(fileContent);
os.close();
// Second, open and read the file content through the URL API
URI uri = fs.getUri();
URL fileURL =
new URL(uri.getScheme(), uri.getHost(), uri.getPort(), filePath
.toString());
InputStream is = fileURL.openStream();
assertNotNull(is);
byte[] bytes = new byte[4096];
assertEquals(1024, is.read(bytes));
is.close();
for (int i = 0; i < fileContent.length; ++i)
assertEquals(fileContent[i], bytes[i]);
// Cleanup: delete the file
fs.delete(filePath, false);
} finally {
fs.close();
cluster.shutdown();
}
}
/**
* Test opening and reading from an InputStream through a file:// URL.
*
* @throws IOException
* @throws URISyntaxException
*/
@Test
public void testFileUrls() throws IOException, URISyntaxException {
// URLStreamHandler is already set in JVM by testDfsUrls()
Configuration conf = new HdfsConfiguration();
// Locate the test temporary directory.
if (!TEST_ROOT_DIR.exists()) {
if (!TEST_ROOT_DIR.mkdirs())
throw new IOException("Cannot create temporary directory: " + TEST_ROOT_DIR);
}
File tmpFile = new File(TEST_ROOT_DIR, "thefile");
URI uri = tmpFile.toURI();
FileSystem fs = FileSystem.get(uri, conf);
try {
byte[] fileContent = new byte[1024];
for (int i = 0; i < fileContent.length; ++i)
fileContent[i] = (byte) i;
// First create the file through the FileSystem API
OutputStream os = fs.create(new Path(uri.getPath()));
os.write(fileContent);
os.close();
// Second, open and read the file content through the URL API.
URL fileURL = uri.toURL();
InputStream is = fileURL.openStream();
assertNotNull(is);
byte[] bytes = new byte[4096];
assertEquals(1024, is.read(bytes));
is.close();
for (int i = 0; i < fileContent.length; ++i)
assertEquals(fileContent[i], bytes[i]);
// Cleanup: delete the file
fs.delete(new Path(uri.getPath()), false);
} finally {
fs.close();
}
}
}
| 4,810 | 29.06875 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsCreateMkdir.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
import java.net.URISyntaxException;
import javax.security.auth.login.LoginException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
public class TestFcHdfsCreateMkdir extends
FileContextCreateMkdirBaseTest {
private static MiniDFSCluster cluster;
private static Path defaultWorkingDirectory;
@Override
protected FileContextTestHelper createFileContextHelper() {
return new FileContextTestHelper("/tmp/TestFcHdfsCreateMkdir");
}
@BeforeClass
public static void clusterSetupAtBegining()
throws IOException, LoginException, URISyntaxException {
Configuration conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
fc = FileContext.getFileContext(cluster.getURI(0), conf);
defaultWorkingDirectory = fc.makeQualified( new Path("/user/" +
UserGroupInformation.getCurrentUser().getShortUserName()));
fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true);
}
@AfterClass
public static void ClusterShutdownAtEnd() throws Exception {
cluster.shutdown();
}
@Override
@Before
public void setUp() throws Exception {
super.setUp();
}
@Override
@After
public void tearDown() throws Exception {
super.tearDown();
}
}
| 2,449 | 31.236842 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestWebHdfsFileContextMainOperations.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import org.apache.commons.lang.RandomStringUtils;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import javax.security.auth.login.LoginException;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.EnumSet;
import static org.apache.hadoop.fs.CreateFlag.CREATE;
import static org.apache.hadoop.fs.FileContextTestHelper.getDefaultBlockSize;
import static org.apache.hadoop.fs.FileContextTestHelper.getFileData;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertTrue;
/**
* Test of FileContext apis on Webhdfs.
*/
public class TestWebHdfsFileContextMainOperations
extends FileContextMainOperationsBaseTest {
protected static MiniDFSCluster cluster;
private static Path defaultWorkingDirectory;
protected static URI webhdfsUrl;
protected static int numBlocks = 2;
protected static final byte[] data = getFileData(numBlocks,
getDefaultBlockSize());
protected static final HdfsConfiguration CONF = new HdfsConfiguration();
@Override
public Path getDefaultWorkingDirectory() {
return defaultWorkingDirectory;
}
public URI getWebhdfsUrl() {
return webhdfsUrl;
}
@BeforeClass
public static void clusterSetupAtBeginning()
throws IOException, LoginException, URISyntaxException {
cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build();
cluster.waitClusterUp();
webhdfsUrl = new URI(WebHdfs.SCHEME + "://" + cluster.getConfiguration(0)
.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY));
fc = FileContext.getFileContext(webhdfsUrl, CONF);
defaultWorkingDirectory = fc.makeQualified(new Path(
"/user/" + UserGroupInformation.getCurrentUser().getShortUserName()));
fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true);
}
@Before
public void setUp() throws Exception {
URI webhdfsUrlReal = getWebhdfsUrl();
Path testBuildData = new Path(
webhdfsUrlReal + "/build/test/data/" + RandomStringUtils
.randomAlphanumeric(10));
Path rootPath = new Path(testBuildData, "root-uri");
localFsRootPath = rootPath.makeQualified(webhdfsUrlReal, null);
fc.mkdir(getTestRootPath(fc, "test"), FileContext.DEFAULT_PERM, true);
}
private Path getTestRootPath(FileContext fc, String path) {
return fileContextTestHelper.getTestRootPath(fc, path);
}
@Override
protected boolean listCorruptedBlocksSupported() {
return false;
}
/**
* Test FileContext APIs when symlinks are not supported
* TODO: Open separate JIRA for full support of the Symlink in webhdfs
*/
@Test
public void testUnsupportedSymlink() throws IOException {
/**
* WebHdfs client Partially supports the Symlink.
* creation of Symlink is supported, but the getLinkTargetPath() api is not supported currently,
* Implement the test case once the full support is available.
*/
}
/**
* TODO: Open JIRA for the idiosyncrasies between hdfs and webhdfs
*/
public void testSetVerifyChecksum() throws IOException {
final Path rootPath = getTestRootPath(fc, "test");
final Path path = new Path(rootPath, "zoo");
FSDataOutputStream out = fc
.create(path, EnumSet.of(CREATE), Options.CreateOpts.createParent());
try {
out.write(data, 0, data.length);
} finally {
out.close();
}
//In webhdfs scheme fc.setVerifyChecksum() can be called only after
// writing first few bytes but in case of the hdfs scheme we can call
// immediately after the creation call.
// instruct FS to verify checksum through the FileContext:
fc.setVerifyChecksum(true, path);
FileStatus fileStatus = fc.getFileStatus(path);
final long len = fileStatus.getLen();
assertTrue(len == data.length);
byte[] bb = new byte[(int) len];
FSDataInputStream fsdis = fc.open(path);
try {
fsdis.readFully(bb);
} finally {
fsdis.close();
}
assertArrayEquals(data, bb);
}
@AfterClass
public static void ClusterShutdownAtEnd() throws Exception {
if (cluster != null) {
cluster.shutdown();
cluster = null;
}
}
}
| 5,251 | 32.240506 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSetUMask.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
import java.net.URISyntaxException;
import javax.security.auth.login.LoginException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.StringUtils;
import static org.apache.hadoop.fs.FileContextTestHelper.*;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.log4j.Level;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestFcHdfsSetUMask {
private static final FileContextTestHelper fileContextTestHelper =
new FileContextTestHelper("/tmp/TestFcHdfsSetUMask");
private static MiniDFSCluster cluster;
private static Path defaultWorkingDirectory;
private static FileContext fc;
// rwxrwx---
private static final FsPermission USER_GROUP_OPEN_PERMISSIONS = FsPermission
.createImmutable((short) 0770);
private static final FsPermission USER_GROUP_OPEN_FILE_PERMISSIONS =
FsPermission.createImmutable((short) 0660);
private static final FsPermission USER_GROUP_OPEN_TEST_UMASK = FsPermission
.createImmutable((short) (0770 ^ 0777));
// ---------
private static final FsPermission BLANK_PERMISSIONS = FsPermission
.createImmutable((short) 0000);
// parent directory permissions when creating a directory with blank (000)
// permissions - it always add the -wx------ bits to the parent so that
// it can create the child
private static final FsPermission PARENT_PERMS_FOR_BLANK_PERMISSIONS =
FsPermission.createImmutable((short) 0300);
private static final FsPermission BLANK_TEST_UMASK = FsPermission
.createImmutable((short) (0000 ^ 0777));
// rwxrwxrwx
private static final FsPermission WIDE_OPEN_PERMISSIONS = FsPermission
.createImmutable((short) 0777);
private static final FsPermission WIDE_OPEN_FILE_PERMISSIONS =
FsPermission.createImmutable((short) 0666);
private static final FsPermission WIDE_OPEN_TEST_UMASK = FsPermission
.createImmutable((short) (0777 ^ 0777));
@BeforeClass
public static void clusterSetupAtBegining()
throws IOException, LoginException, URISyntaxException {
Configuration conf = new HdfsConfiguration();
// set permissions very restrictive
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077");
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
fc = FileContext.getFileContext(cluster.getURI(0), conf);
defaultWorkingDirectory = fc.makeQualified( new Path("/user/" +
UserGroupInformation.getCurrentUser().getShortUserName()));
fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true);
}
@AfterClass
public static void ClusterShutdownAtEnd() throws Exception {
cluster.shutdown();
}
{
try {
((Log4JLogger)FileSystem.LOG).getLogger().setLevel(Level.DEBUG);
}
catch(Exception e) {
System.out.println("Cannot change log level\n"
+ StringUtils.stringifyException(e));
}
}
@Before
public void setUp() throws Exception {
fc.setUMask(WIDE_OPEN_TEST_UMASK);
fc.mkdir(fileContextTestHelper.getTestRootPath(fc), FileContext.DEFAULT_PERM, true);
}
@After
public void tearDown() throws Exception {
fc.delete(fileContextTestHelper.getTestRootPath(fc), true);
}
@Test
public void testMkdirWithExistingDirClear() throws IOException {
testMkdirWithExistingDir(BLANK_TEST_UMASK, BLANK_PERMISSIONS);
}
@Test
public void testMkdirWithExistingDirOpen() throws IOException {
testMkdirWithExistingDir(WIDE_OPEN_TEST_UMASK, WIDE_OPEN_PERMISSIONS);
}
@Test
public void testMkdirWithExistingDirMiddle() throws IOException {
testMkdirWithExistingDir(USER_GROUP_OPEN_TEST_UMASK,
USER_GROUP_OPEN_PERMISSIONS);
}
@Test
public void testMkdirRecursiveWithNonExistingDirClear() throws IOException {
// by default parent directories have -wx------ bits set
testMkdirRecursiveWithNonExistingDir(BLANK_TEST_UMASK, BLANK_PERMISSIONS,
PARENT_PERMS_FOR_BLANK_PERMISSIONS);
}
@Test
public void testMkdirRecursiveWithNonExistingDirOpen() throws IOException {
testMkdirRecursiveWithNonExistingDir(WIDE_OPEN_TEST_UMASK,
WIDE_OPEN_PERMISSIONS, WIDE_OPEN_PERMISSIONS);
}
@Test
public void testMkdirRecursiveWithNonExistingDirMiddle() throws IOException {
testMkdirRecursiveWithNonExistingDir(USER_GROUP_OPEN_TEST_UMASK,
USER_GROUP_OPEN_PERMISSIONS, USER_GROUP_OPEN_PERMISSIONS);
}
@Test
public void testCreateRecursiveWithExistingDirClear() throws IOException {
testCreateRecursiveWithExistingDir(BLANK_TEST_UMASK, BLANK_PERMISSIONS);
}
@Test
public void testCreateRecursiveWithExistingDirOpen() throws IOException {
testCreateRecursiveWithExistingDir(WIDE_OPEN_TEST_UMASK,
WIDE_OPEN_FILE_PERMISSIONS);
}
@Test
public void testCreateRecursiveWithExistingDirMiddle() throws IOException {
testCreateRecursiveWithExistingDir(USER_GROUP_OPEN_TEST_UMASK,
USER_GROUP_OPEN_FILE_PERMISSIONS);
}
@Test
public void testCreateRecursiveWithNonExistingDirClear() throws IOException {
// directory permission inherited from parent so this must match the @Before
// set of umask
testCreateRecursiveWithNonExistingDir(BLANK_TEST_UMASK,
WIDE_OPEN_PERMISSIONS, BLANK_PERMISSIONS);
}
@Test
public void testCreateRecursiveWithNonExistingDirOpen() throws IOException {
// directory permission inherited from parent so this must match the @Before
// set of umask
testCreateRecursiveWithNonExistingDir(WIDE_OPEN_TEST_UMASK,
WIDE_OPEN_PERMISSIONS, WIDE_OPEN_FILE_PERMISSIONS);
}
@Test
public void testCreateRecursiveWithNonExistingDirMiddle() throws IOException {
// directory permission inherited from parent so this must match the @Before
// set of umask
testCreateRecursiveWithNonExistingDir(USER_GROUP_OPEN_TEST_UMASK,
WIDE_OPEN_PERMISSIONS, USER_GROUP_OPEN_FILE_PERMISSIONS);
}
public void testMkdirWithExistingDir(FsPermission umask,
FsPermission expectedPerms) throws IOException {
Path f = fileContextTestHelper.getTestRootPath(fc, "aDir");
fc.setUMask(umask);
fc.mkdir(f, FileContext.DEFAULT_PERM, true);
Assert.assertTrue(isDir(fc, f));
Assert.assertEquals("permissions on directory are wrong",
expectedPerms, fc.getFileStatus(f).getPermission());
}
public void testMkdirRecursiveWithNonExistingDir(FsPermission umask,
FsPermission expectedPerms, FsPermission expectedParentPerms)
throws IOException {
Path f = fileContextTestHelper.getTestRootPath(fc, "NonExistant2/aDir");
fc.setUMask(umask);
fc.mkdir(f, FileContext.DEFAULT_PERM, true);
Assert.assertTrue(isDir(fc, f));
Assert.assertEquals("permissions on directory are wrong",
expectedPerms, fc.getFileStatus(f).getPermission());
Path fParent = fileContextTestHelper.getTestRootPath(fc, "NonExistant2");
Assert.assertEquals("permissions on parent directory are wrong",
expectedParentPerms, fc.getFileStatus(fParent).getPermission());
}
public void testCreateRecursiveWithExistingDir(FsPermission umask,
FsPermission expectedPerms) throws IOException {
Path f = fileContextTestHelper.getTestRootPath(fc,"foo");
fc.setUMask(umask);
createFile(fc, f);
Assert.assertTrue(isFile(fc, f));
Assert.assertEquals("permissions on file are wrong",
expectedPerms , fc.getFileStatus(f).getPermission());
}
public void testCreateRecursiveWithNonExistingDir(FsPermission umask,
FsPermission expectedDirPerms, FsPermission expectedFilePerms)
throws IOException {
Path f = fileContextTestHelper.getTestRootPath(fc,"NonExisting/foo");
Path fParent = fileContextTestHelper.getTestRootPath(fc, "NonExisting");
Assert.assertFalse(exists(fc, fParent));
fc.setUMask(umask);
createFile(fc, f);
Assert.assertTrue(isFile(fc, f));
Assert.assertEquals("permissions on file are wrong",
expectedFilePerms, fc.getFileStatus(f).getPermission());
Assert.assertEquals("permissions on parent directory are wrong",
expectedDirPerms, fc.getFileStatus(fParent).getPermission());
}
}
| 9,422 | 36.098425 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.File;
import static org.junit.Assert.fail;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.EnumSet;
import java.util.List;
import java.util.Set;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
/**
* Tests whether FileContext can resolve an hdfs path that has a symlink to
* local file system. Also tests getDelegationTokens API in file context with
* underlying file system as Hdfs.
*/
public class TestResolveHdfsSymlink {
private static final FileContextTestHelper helper = new FileContextTestHelper();
private static MiniDFSCluster cluster = null;
@BeforeClass
public static void setUp() throws IOException {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(
DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
}
@AfterClass
public static void tearDown() {
if (cluster != null) {
cluster.shutdown();
}
}
/**
* Tests resolution of an hdfs symlink to the local file system.
* @throws IOException
* @throws InterruptedException
*/
@Test
public void testFcResolveAfs() throws IOException, InterruptedException {
Configuration conf = new Configuration();
FileContext fcLocal = FileContext.getLocalFSFileContext();
FileContext fcHdfs = FileContext.getFileContext(cluster.getFileSystem()
.getUri());
final String localTestRoot = helper.getAbsoluteTestRootDir(fcLocal);
Path alphaLocalPath = new Path(fcLocal.getDefaultFileSystem().getUri()
.toString(), new File(localTestRoot, "alpha").getAbsolutePath());
DFSTestUtil.createFile(FileSystem.getLocal(conf), alphaLocalPath, 16,
(short) 1, 2);
Path linkTarget = new Path(fcLocal.getDefaultFileSystem().getUri()
.toString(), localTestRoot);
Path hdfsLink = new Path(fcHdfs.getDefaultFileSystem().getUri().toString(),
"/tmp/link");
fcHdfs.createSymlink(linkTarget, hdfsLink, true);
Path alphaHdfsPathViaLink = new Path(fcHdfs.getDefaultFileSystem().getUri()
.toString()
+ "/tmp/link/alpha");
Set<AbstractFileSystem> afsList = fcHdfs
.resolveAbstractFileSystems(alphaHdfsPathViaLink);
Assert.assertEquals(2, afsList.size());
for (AbstractFileSystem afs : afsList) {
if ((!afs.equals(fcHdfs.getDefaultFileSystem()))
&& (!afs.equals(fcLocal.getDefaultFileSystem()))) {
Assert.fail("Failed to resolve AFS correctly");
}
}
}
/**
* Tests delegation token APIs in FileContext for Hdfs; and renew and cancel
* APIs in Hdfs.
*
* @throws UnsupportedFileSystemException
* @throws IOException
* @throws InterruptedException
*/
@SuppressWarnings({ "unchecked", "deprecation" })
@Test
public void testFcDelegationToken() throws UnsupportedFileSystemException,
IOException, InterruptedException {
FileContext fcHdfs = FileContext.getFileContext(cluster.getFileSystem()
.getUri());
final AbstractFileSystem afs = fcHdfs.getDefaultFileSystem();
final List<Token<?>> tokenList =
afs.getDelegationTokens(UserGroupInformation.getCurrentUser()
.getUserName());
((Hdfs) afs).renewDelegationToken((Token<DelegationTokenIdentifier>) tokenList
.get(0));
((Hdfs) afs).cancelDelegationToken(
(Token<? extends AbstractDelegationTokenIdentifier>) tokenList.get(0));
}
/**
* Verifies that attempting to resolve a non-symlink results in client
* exception
*/
@Test
public void testLinkTargetNonSymlink() throws UnsupportedFileSystemException,
IOException {
FileContext fc = null;
Path notSymlink = new Path("/notasymlink");
try {
fc = FileContext.getFileContext(cluster.getFileSystem().getUri());
fc.create(notSymlink, EnumSet.of(CreateFlag.CREATE));
DFSClient client = new DFSClient(cluster.getFileSystem().getUri(),
cluster.getConfiguration(0));
try {
client.getLinkTarget(notSymlink.toString());
fail("Expected exception for resolving non-symlink");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("is not a symbolic link", e);
}
} finally {
if (fc != null) {
fc.delete(notSymlink, false);
}
}
}
/**
* Tests that attempting to resolve a non-existent-file
*/
@Test
public void testLinkTargetNonExistent() throws IOException {
Path doesNotExist = new Path("/filethatdoesnotexist");
DFSClient client = new DFSClient(cluster.getFileSystem().getUri(),
cluster.getConfiguration(0));
try {
client.getLinkTarget(doesNotExist.toString());
fail("Expected exception for resolving non-existent file");
} catch (FileNotFoundException e) {
GenericTestUtils.assertExceptionContains("File does not exist: "
+ doesNotExist.toString(), e);
}
}
}
| 6,489 | 35.055556 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.loadGenerator;
import static org.junit.Assert.assertEquals;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.FileWriter;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.junit.Test;
/**
* This class tests if a balancer schedules tasks correctly.
*/
public class TestLoadGenerator extends Configured implements Tool {
private static final Configuration CONF = new HdfsConfiguration();
private static final int DEFAULT_BLOCK_SIZE = 10;
private static final File OUT_DIR = PathUtils.getTestDir(TestLoadGenerator.class);
private static final File DIR_STRUCTURE_FILE =
new File(OUT_DIR, StructureGenerator.DIR_STRUCTURE_FILE_NAME);
private static final File FILE_STRUCTURE_FILE =
new File(OUT_DIR, StructureGenerator.FILE_STRUCTURE_FILE_NAME);
private static final String DIR_STRUCTURE_FIRST_LINE = "/dir0";
private static final String DIR_STRUCTURE_SECOND_LINE = "/dir1";
private static final String FILE_STRUCTURE_FIRST_LINE =
"/dir0/_file_0 0.3754598635933768";
private static final String FILE_STRUCTURE_SECOND_LINE =
"/dir1/_file_1 1.4729310851145203";
static {
CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
CONF.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE);
CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
}
/** Test if the structure generator works fine */
@Test
public void testStructureGenerator() throws Exception {
StructureGenerator sg = new StructureGenerator();
String[] args = new String[]{"-maxDepth", "2", "-minWidth", "1",
"-maxWidth", "2", "-numOfFiles", "2",
"-avgFileSize", "1", "-outDir", OUT_DIR.getAbsolutePath(), "-seed", "1"};
final int MAX_DEPTH = 1;
final int MIN_WIDTH = 3;
final int MAX_WIDTH = 5;
final int NUM_OF_FILES = 7;
final int AVG_FILE_SIZE = 9;
final int SEED = 13;
try {
// successful case
assertEquals(0, sg.run(args));
BufferedReader in = new BufferedReader(new FileReader(DIR_STRUCTURE_FILE));
assertEquals(DIR_STRUCTURE_FIRST_LINE, in.readLine());
assertEquals(DIR_STRUCTURE_SECOND_LINE, in.readLine());
assertEquals(null, in.readLine());
in.close();
in = new BufferedReader(new FileReader(FILE_STRUCTURE_FILE));
assertEquals(FILE_STRUCTURE_FIRST_LINE, in.readLine());
assertEquals(FILE_STRUCTURE_SECOND_LINE, in.readLine());
assertEquals(null, in.readLine());
in.close();
String oldArg = args[MAX_DEPTH];
args[MAX_DEPTH] = "0";
assertEquals(-1, sg.run(args));
args[MAX_DEPTH] = oldArg;
oldArg = args[MIN_WIDTH];
args[MIN_WIDTH] = "-1";
assertEquals(-1, sg.run(args));
args[MIN_WIDTH] = oldArg;
oldArg = args[MAX_WIDTH];
args[MAX_WIDTH] = "-1";
assertEquals(-1, sg.run(args));
args[MAX_WIDTH] = oldArg;
oldArg = args[NUM_OF_FILES];
args[NUM_OF_FILES] = "-1";
assertEquals(-1, sg.run(args));
args[NUM_OF_FILES] = oldArg;
oldArg = args[NUM_OF_FILES];
args[NUM_OF_FILES] = "-1";
assertEquals(-1, sg.run(args));
args[NUM_OF_FILES] = oldArg;
oldArg = args[AVG_FILE_SIZE];
args[AVG_FILE_SIZE] = "-1";
assertEquals(-1, sg.run(args));
args[AVG_FILE_SIZE] = oldArg;
oldArg = args[SEED];
args[SEED] = "34.d4";
assertEquals(-1, sg.run(args));
args[SEED] = oldArg;
} finally {
DIR_STRUCTURE_FILE.delete();
FILE_STRUCTURE_FILE.delete();
}
}
/** Test if the load generator works fine */
@Test
public void testLoadGenerator() throws Exception {
final String TEST_SPACE_ROOT = "/test";
final String SCRIPT_TEST_DIR = OUT_DIR.getAbsolutePath();
String script = SCRIPT_TEST_DIR + "/" + "loadgenscript";
String script2 = SCRIPT_TEST_DIR + "/" + "loadgenscript2";
File scriptFile1 = new File(script);
File scriptFile2 = new File(script2);
FileWriter writer = new FileWriter(DIR_STRUCTURE_FILE);
writer.write(DIR_STRUCTURE_FIRST_LINE+"\n");
writer.write(DIR_STRUCTURE_SECOND_LINE+"\n");
writer.close();
writer = new FileWriter(FILE_STRUCTURE_FILE);
writer.write(FILE_STRUCTURE_FIRST_LINE+"\n");
writer.write(FILE_STRUCTURE_SECOND_LINE+"\n");
writer.close();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(3).build();
cluster.waitActive();
try {
DataGenerator dg = new DataGenerator();
dg.setConf(CONF);
String [] args = new String[] {"-inDir", OUT_DIR.getAbsolutePath(), "-root", TEST_SPACE_ROOT};
assertEquals(0, dg.run(args));
final int READ_PROBABILITY = 1;
final int WRITE_PROBABILITY = 3;
final int MAX_DELAY_BETWEEN_OPS = 7;
final int NUM_OF_THREADS = 9;
final int START_TIME = 11;
final int ELAPSED_TIME = 13;
LoadGenerator lg = new LoadGenerator();
lg.setConf(CONF);
args = new String[] {"-readProbability", "0.3", "-writeProbability", "0.3",
"-root", TEST_SPACE_ROOT, "-maxDelayBetweenOps", "0",
"-numOfThreads", "1", "-startTime",
Long.toString(Time.now()), "-elapsedTime", "10"};
assertEquals(0, lg.run(args));
String oldArg = args[READ_PROBABILITY];
args[READ_PROBABILITY] = "1.1";
assertEquals(-1, lg.run(args));
args[READ_PROBABILITY] = "-1.1";
assertEquals(-1, lg.run(args));
args[READ_PROBABILITY] = oldArg;
oldArg = args[WRITE_PROBABILITY];
args[WRITE_PROBABILITY] = "1.1";
assertEquals(-1, lg.run(args));
args[WRITE_PROBABILITY] = "-1.1";
assertEquals(-1, lg.run(args));
args[WRITE_PROBABILITY] = "0.9";
assertEquals(-1, lg.run(args));
args[READ_PROBABILITY] = oldArg;
oldArg = args[MAX_DELAY_BETWEEN_OPS];
args[MAX_DELAY_BETWEEN_OPS] = "1.x1";
assertEquals(-1, lg.run(args));
args[MAX_DELAY_BETWEEN_OPS] = oldArg;
oldArg = args[MAX_DELAY_BETWEEN_OPS];
args[MAX_DELAY_BETWEEN_OPS] = "1.x1";
assertEquals(-1, lg.run(args));
args[MAX_DELAY_BETWEEN_OPS] = oldArg;
oldArg = args[NUM_OF_THREADS];
args[NUM_OF_THREADS] = "-1";
assertEquals(-1, lg.run(args));
args[NUM_OF_THREADS] = oldArg;
oldArg = args[START_TIME];
args[START_TIME] = "-1";
assertEquals(-1, lg.run(args));
args[START_TIME] = oldArg;
oldArg = args[ELAPSED_TIME];
args[ELAPSED_TIME] = "-1";
assertEquals(-1, lg.run(args));
args[ELAPSED_TIME] = oldArg;
// test scripted operation
// Test with good script
FileWriter fw = new FileWriter(scriptFile1);
fw.write("2 .22 .33\n");
fw.write("3 .10 .6\n");
fw.write("6 0 .7\n");
fw.close();
String[] scriptArgs = new String[] {
"-root", TEST_SPACE_ROOT, "-maxDelayBetweenOps", "0",
"-numOfThreads", "10", "-startTime",
Long.toString(Time.now()), "-scriptFile", script};
assertEquals(0, lg.run(scriptArgs));
// Test with bad script
fw = new FileWriter(scriptFile2);
fw.write("2 .22 .33\n");
fw.write("3 blah blah blah .6\n");
fw.write("6 0 .7\n");
fw.close();
scriptArgs[scriptArgs.length - 1] = script2;
assertEquals(-1, lg.run(scriptArgs));
} finally {
cluster.shutdown();
DIR_STRUCTURE_FILE.delete();
FILE_STRUCTURE_FILE.delete();
scriptFile1.delete();
scriptFile2.delete();
}
}
/**
* @param args
*/
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new TestLoadGenerator(), args);
System.exit(res);
}
@Override
public int run(String[] args) throws Exception {
TestLoadGenerator loadGeneratorTest = new TestLoadGenerator();
loadGeneratorTest.testStructureGenerator();
loadGeneratorTest.testLoadGenerator();
return 0;
}
}
| 9,254 | 33.533582 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.permission;
import static org.apache.hadoop.fs.permission.AclEntryScope.*;
import static org.apache.hadoop.fs.permission.AclEntryType.*;
import static org.apache.hadoop.fs.permission.FsAction.*;
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.util.Arrays;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestStickyBit {
static final UserGroupInformation user1 =
UserGroupInformation.createUserForTesting("theDoctor", new String[] {"tardis"});
static final UserGroupInformation user2 =
UserGroupInformation.createUserForTesting("rose", new String[] {"powellestates"});
private static MiniDFSCluster cluster;
private static Configuration conf;
private static FileSystem hdfs;
private static FileSystem hdfsAsUser1;
private static FileSystem hdfsAsUser2;
@BeforeClass
public static void init() throws Exception {
conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
initCluster(true);
}
private static void initCluster(boolean format) throws Exception {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).format(format)
.build();
hdfs = cluster.getFileSystem();
assertTrue(hdfs instanceof DistributedFileSystem);
hdfsAsUser1 = DFSTestUtil.getFileSystemAs(user1, conf);
assertTrue(hdfsAsUser1 instanceof DistributedFileSystem);
hdfsAsUser2 = DFSTestUtil.getFileSystemAs(user2, conf);
assertTrue(hdfsAsUser2 instanceof DistributedFileSystem);
}
@Before
public void setup() throws Exception {
if (hdfs != null) {
for (FileStatus stat: hdfs.listStatus(new Path("/"))) {
hdfs.delete(stat.getPath(), true);
}
}
}
@AfterClass
public static void shutdown() throws Exception {
IOUtils.cleanup(null, hdfs, hdfsAsUser1, hdfsAsUser2);
if (cluster != null) {
cluster.shutdown();
}
}
/**
* Ensure that even if a file is in a directory with the sticky bit on,
* another user can write to that file (assuming correct permissions).
*/
private void confirmCanAppend(Configuration conf, Path p) throws Exception {
// Write a file to the new tmp directory as a regular user
Path file = new Path(p, "foo");
writeFile(hdfsAsUser1, file);
hdfsAsUser1.setPermission(file, new FsPermission((short) 0777));
// Log onto cluster as another user and attempt to append to file
Path file2 = new Path(p, "foo");
FSDataOutputStream h = null;
try {
h = hdfsAsUser2.append(file2);
h.write("Some more data".getBytes());
h.close();
h = null;
} finally {
IOUtils.cleanup(null, h);
}
}
/**
* Test that one user can't delete another user's file when the sticky bit is
* set.
*/
private void confirmDeletingFiles(Configuration conf, Path p)
throws Exception {
// Write a file to the new temp directory as a regular user
Path file = new Path(p, "foo");
writeFile(hdfsAsUser1, file);
// Make sure the correct user is the owner
assertEquals(user1.getShortUserName(),
hdfsAsUser1.getFileStatus(file).getOwner());
// Log onto cluster as another user and attempt to delete the file
try {
hdfsAsUser2.delete(file, false);
fail("Shouldn't be able to delete someone else's file with SB on");
} catch (IOException ioe) {
assertTrue(ioe instanceof AccessControlException);
assertTrue(ioe.getMessage().contains("sticky bit"));
}
}
/**
* Test that if a directory is created in a directory that has the sticky bit
* on, the new directory does not automatically get a sticky bit, as is
* standard Unix behavior
*/
private void confirmStickyBitDoesntPropagate(FileSystem hdfs, Path p)
throws IOException {
// Create a subdirectory within it
Path p2 = new Path(p, "bar");
hdfs.mkdirs(p2);
// Ensure new directory doesn't have its sticky bit on
assertFalse(hdfs.getFileStatus(p2).getPermission().getStickyBit());
}
/**
* Test basic ability to get and set sticky bits on files and directories.
*/
private void confirmSettingAndGetting(FileSystem hdfs, Path p, Path baseDir)
throws IOException {
// Initially sticky bit should not be set
assertFalse(hdfs.getFileStatus(p).getPermission().getStickyBit());
// Same permission, but with sticky bit on
short withSB;
withSB = (short) (hdfs.getFileStatus(p).getPermission().toShort() | 01000);
assertTrue((new FsPermission(withSB)).getStickyBit());
hdfs.setPermission(p, new FsPermission(withSB));
assertTrue(hdfs.getFileStatus(p).getPermission().getStickyBit());
// Write a file to the fs, try to set its sticky bit
Path f = new Path(baseDir, "somefile");
writeFile(hdfs, f);
assertFalse(hdfs.getFileStatus(f).getPermission().getStickyBit());
withSB = (short) (hdfs.getFileStatus(f).getPermission().toShort() | 01000);
hdfs.setPermission(f, new FsPermission(withSB));
assertTrue(hdfs.getFileStatus(f).getPermission().getStickyBit());
}
@Test
public void testGeneralSBBehavior() throws Exception {
Path baseDir = new Path("/mcgann");
hdfs.mkdirs(baseDir);
// Create a tmp directory with wide-open permissions and sticky bit
Path p = new Path(baseDir, "tmp");
hdfs.mkdirs(p);
hdfs.setPermission(p, new FsPermission((short) 01777));
confirmCanAppend(conf, p);
baseDir = new Path("/eccleston");
hdfs.mkdirs(baseDir);
p = new Path(baseDir, "roguetraders");
hdfs.mkdirs(p);
confirmSettingAndGetting(hdfs, p, baseDir);
baseDir = new Path("/tennant");
hdfs.mkdirs(baseDir);
p = new Path(baseDir, "contemporary");
hdfs.mkdirs(p);
hdfs.setPermission(p, new FsPermission((short) 01777));
confirmDeletingFiles(conf, p);
baseDir = new Path("/smith");
hdfs.mkdirs(baseDir);
p = new Path(baseDir, "scissorsisters");
// Turn on its sticky bit
hdfs.mkdirs(p, new FsPermission((short) 01666));
confirmStickyBitDoesntPropagate(hdfs, baseDir);
}
@Test
public void testAclGeneralSBBehavior() throws Exception {
Path baseDir = new Path("/mcgann");
hdfs.mkdirs(baseDir);
// Create a tmp directory with wide-open permissions and sticky bit
Path p = new Path(baseDir, "tmp");
hdfs.mkdirs(p);
hdfs.setPermission(p, new FsPermission((short) 01777));
applyAcl(p);
confirmCanAppend(conf, p);
baseDir = new Path("/eccleston");
hdfs.mkdirs(baseDir);
p = new Path(baseDir, "roguetraders");
hdfs.mkdirs(p);
applyAcl(p);
confirmSettingAndGetting(hdfs, p, baseDir);
baseDir = new Path("/tennant");
hdfs.mkdirs(baseDir);
p = new Path(baseDir, "contemporary");
hdfs.mkdirs(p);
hdfs.setPermission(p, new FsPermission((short) 01777));
applyAcl(p);
confirmDeletingFiles(conf, p);
baseDir = new Path("/smith");
hdfs.mkdirs(baseDir);
p = new Path(baseDir, "scissorsisters");
// Turn on its sticky bit
hdfs.mkdirs(p, new FsPermission((short) 01666));
applyAcl(p);
confirmStickyBitDoesntPropagate(hdfs, p);
}
/**
* Test that one user can't rename/move another user's file when the sticky
* bit is set.
*/
@Test
public void testMovingFiles() throws Exception {
testMovingFiles(false);
}
@Test
public void testAclMovingFiles() throws Exception {
testMovingFiles(true);
}
private void testMovingFiles(boolean useAcl) throws Exception {
// Create a tmp directory with wide-open permissions and sticky bit
Path tmpPath = new Path("/tmp");
Path tmpPath2 = new Path("/tmp2");
hdfs.mkdirs(tmpPath);
hdfs.mkdirs(tmpPath2);
hdfs.setPermission(tmpPath, new FsPermission((short) 01777));
if (useAcl) {
applyAcl(tmpPath);
}
hdfs.setPermission(tmpPath2, new FsPermission((short) 01777));
if (useAcl) {
applyAcl(tmpPath2);
}
// Write a file to the new tmp directory as a regular user
Path file = new Path(tmpPath, "foo");
writeFile(hdfsAsUser1, file);
// Log onto cluster as another user and attempt to move the file
try {
hdfsAsUser2.rename(file, new Path(tmpPath2, "renamed"));
fail("Shouldn't be able to rename someone else's file with SB on");
} catch (IOException ioe) {
assertTrue(ioe instanceof AccessControlException);
assertTrue(ioe.getMessage().contains("sticky bit"));
}
}
/**
* Ensure that when we set a sticky bit and shut down the file system, we get
* the sticky bit back on re-start, and that no extra sticky bits appear after
* re-start.
*/
@Test
public void testStickyBitPersistence() throws Exception {
// A tale of three directories...
Path sbSet = new Path("/Housemartins");
Path sbNotSpecified = new Path("/INXS");
Path sbSetOff = new Path("/Easyworld");
for (Path p : new Path[] { sbSet, sbNotSpecified, sbSetOff })
hdfs.mkdirs(p);
// Two directories had there sticky bits set explicitly...
hdfs.setPermission(sbSet, new FsPermission((short) 01777));
hdfs.setPermission(sbSetOff, new FsPermission((short) 00777));
shutdown();
// Start file system up again
initCluster(false);
assertTrue(hdfs.exists(sbSet));
assertTrue(hdfs.getFileStatus(sbSet).getPermission().getStickyBit());
assertTrue(hdfs.exists(sbNotSpecified));
assertFalse(hdfs.getFileStatus(sbNotSpecified).getPermission()
.getStickyBit());
assertTrue(hdfs.exists(sbSetOff));
assertFalse(hdfs.getFileStatus(sbSetOff).getPermission().getStickyBit());
}
@Test
public void testAclStickyBitPersistence() throws Exception {
// A tale of three directories...
Path sbSet = new Path("/Housemartins");
Path sbNotSpecified = new Path("/INXS");
Path sbSetOff = new Path("/Easyworld");
for (Path p : new Path[] { sbSet, sbNotSpecified, sbSetOff })
hdfs.mkdirs(p);
// Two directories had there sticky bits set explicitly...
hdfs.setPermission(sbSet, new FsPermission((short) 01777));
applyAcl(sbSet);
hdfs.setPermission(sbSetOff, new FsPermission((short) 00777));
applyAcl(sbSetOff);
shutdown();
// Start file system up again
initCluster(false);
assertTrue(hdfs.exists(sbSet));
assertTrue(hdfs.getFileStatus(sbSet).getPermission().getStickyBit());
assertTrue(hdfs.exists(sbNotSpecified));
assertFalse(hdfs.getFileStatus(sbNotSpecified).getPermission()
.getStickyBit());
assertTrue(hdfs.exists(sbSetOff));
assertFalse(hdfs.getFileStatus(sbSetOff).getPermission().getStickyBit());
}
/***
* Write a quick file to the specified file system at specified path
*/
static private void writeFile(FileSystem hdfs, Path p) throws IOException {
FSDataOutputStream o = null;
try {
o = hdfs.create(p);
o.write("some file contents".getBytes());
o.close();
o = null;
} finally {
IOUtils.cleanup(null, o);
}
}
/**
* Applies an ACL (both access and default) to the given path.
*
* @param p Path to set
* @throws IOException if an ACL could not be modified
*/
private static void applyAcl(Path p) throws IOException {
hdfs.modifyAclEntries(p, Arrays.asList(
aclEntry(ACCESS, USER, user2.getShortUserName(), ALL),
aclEntry(DEFAULT, USER, user2.getShortUserName(), ALL)));
}
}
| 13,158 | 31.652605 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithAcls.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.viewfs;
import com.google.common.collect.Lists;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileContextTestHelper;
import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import java.util.List;
import java.io.IOException;
import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS;
import static org.apache.hadoop.fs.permission.AclEntryScope.DEFAULT;
import static org.apache.hadoop.fs.permission.AclEntryType.*;
import static org.apache.hadoop.fs.permission.FsAction.*;
import static org.apache.hadoop.fs.permission.FsAction.NONE;
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
/**
* Verify ACL through ViewFs functionality.
*/
public class TestViewFsWithAcls {
private static MiniDFSCluster cluster;
private static Configuration clusterConf = new Configuration();
private static FileContext fc, fc2;
private FileContext fcView, fcTarget, fcTarget2;
private Configuration fsViewConf;
private Path targetTestRoot, targetTestRoot2, mountOnNn1, mountOnNn2;
private FileContextTestHelper fileContextTestHelper =
new FileContextTestHelper("/tmp/TestViewFsWithAcls");
@BeforeClass
public static void clusterSetupAtBeginning() throws IOException {
clusterConf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
cluster = new MiniDFSCluster.Builder(clusterConf)
.nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
.numDataNodes(2)
.build();
cluster.waitClusterUp();
fc = FileContext.getFileContext(cluster.getURI(0), clusterConf);
fc2 = FileContext.getFileContext(cluster.getURI(1), clusterConf);
}
@AfterClass
public static void ClusterShutdownAtEnd() throws Exception {
cluster.shutdown();
}
@Before
public void setUp() throws Exception {
fcTarget = fc;
fcTarget2 = fc2;
targetTestRoot = fileContextTestHelper.getAbsoluteTestRootPath(fc);
targetTestRoot2 = fileContextTestHelper.getAbsoluteTestRootPath(fc2);
fcTarget.delete(targetTestRoot, true);
fcTarget2.delete(targetTestRoot2, true);
fcTarget.mkdir(targetTestRoot, new FsPermission((short)0750), true);
fcTarget2.mkdir(targetTestRoot2, new FsPermission((short)0750), true);
fsViewConf = ViewFileSystemTestSetup.createConfig();
setupMountPoints();
fcView = FileContext.getFileContext(FsConstants.VIEWFS_URI, fsViewConf);
}
private void setupMountPoints() {
mountOnNn1 = new Path("/mountOnNn1");
mountOnNn2 = new Path("/mountOnNn2");
ConfigUtil.addLink(fsViewConf, mountOnNn1.toString(), targetTestRoot.toUri());
ConfigUtil.addLink(fsViewConf, mountOnNn2.toString(), targetTestRoot2.toUri());
}
@After
public void tearDown() throws Exception {
fcTarget.delete(fileContextTestHelper.getTestRootPath(fcTarget), true);
fcTarget2.delete(fileContextTestHelper.getTestRootPath(fcTarget2), true);
}
/**
* Verify a ViewFs wrapped over multiple federated NameNodes will
* dispatch the ACL operations to the correct NameNode.
*/
@Test
public void testAclOnMountEntry() throws Exception {
// Set ACLs on the first namespace and verify they are correct
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, USER, "foo", READ),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, OTHER, NONE));
fcView.setAcl(mountOnNn1, aclSpec);
AclEntry[] expected = new AclEntry[] {
aclEntry(ACCESS, USER, "foo", READ),
aclEntry(ACCESS, GROUP, READ) };
assertArrayEquals(expected, aclEntryArray(fcView.getAclStatus(mountOnNn1)));
// Double-check by getting ACL status using FileSystem
// instead of ViewFs
assertArrayEquals(expected, aclEntryArray(fc.getAclStatus(targetTestRoot)));
// Modify the ACL entries on the first namespace
aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, "foo", READ));
fcView.modifyAclEntries(mountOnNn1, aclSpec);
expected = new AclEntry[] {
aclEntry(ACCESS, USER, "foo", READ),
aclEntry(ACCESS, GROUP, READ),
aclEntry(DEFAULT, USER, READ_WRITE),
aclEntry(DEFAULT, USER, "foo", READ),
aclEntry(DEFAULT, GROUP, READ),
aclEntry(DEFAULT, MASK, READ),
aclEntry(DEFAULT, OTHER, NONE) };
assertArrayEquals(expected, aclEntryArray(fcView.getAclStatus(mountOnNn1)));
fcView.removeDefaultAcl(mountOnNn1);
expected = new AclEntry[] {
aclEntry(ACCESS, USER, "foo", READ),
aclEntry(ACCESS, GROUP, READ) };
assertArrayEquals(expected, aclEntryArray(fcView.getAclStatus(mountOnNn1)));
assertArrayEquals(expected, aclEntryArray(fc.getAclStatus(targetTestRoot)));
// Paranoid check: verify the other namespace does not
// have ACLs set on the same path.
assertEquals(0, fcView.getAclStatus(mountOnNn2).getEntries().size());
assertEquals(0, fc2.getAclStatus(targetTestRoot2).getEntries().size());
// Remove the ACL entries on the first namespace
fcView.removeAcl(mountOnNn1);
assertEquals(0, fcView.getAclStatus(mountOnNn1).getEntries().size());
assertEquals(0, fc.getAclStatus(targetTestRoot).getEntries().size());
// Now set ACLs on the second namespace
aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "bar", READ));
fcView.modifyAclEntries(mountOnNn2, aclSpec);
expected = new AclEntry[] {
aclEntry(ACCESS, USER, "bar", READ),
aclEntry(ACCESS, GROUP, READ_EXECUTE) };
assertArrayEquals(expected, aclEntryArray(fcView.getAclStatus(mountOnNn2)));
assertArrayEquals(expected, aclEntryArray(fc2.getAclStatus(targetTestRoot2)));
// Remove the ACL entries on the second namespace
fcView.removeAclEntries(mountOnNn2, Lists.newArrayList(
aclEntry(ACCESS, USER, "bar", READ)
));
expected = new AclEntry[] { aclEntry(ACCESS, GROUP, READ_EXECUTE) };
assertArrayEquals(expected, aclEntryArray(fc2.getAclStatus(targetTestRoot2)));
fcView.removeAcl(mountOnNn2);
assertEquals(0, fcView.getAclStatus(mountOnNn2).getEntries().size());
assertEquals(0, fc2.getAclStatus(targetTestRoot2).getEntries().size());
}
private AclEntry[] aclEntryArray(AclStatus aclStatus) {
return aclStatus.getEntries().toArray(new AclEntry[0]);
}
}
| 7,725 | 39.450262 | 83 |
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.