repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDisableConnCache.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.junit.Test;
/**
* This class tests disabling client connection caching in a single node
* mini-cluster.
*/
public class TestDisableConnCache {
static final Log LOG = LogFactory.getLog(TestDisableConnCache.class);
static final int BLOCK_SIZE = 4096;
static final int FILE_SIZE = 3 * BLOCK_SIZE;
/**
* Test that the socket cache can be disabled by setting the capacity to
* 0. Regression test for HDFS-3365.
* @throws Exception
*/
@Test
public void testDisableCache() throws Exception {
HdfsConfiguration confWithoutCache = new HdfsConfiguration();
// Configure a new instance with no peer caching, ensure that it doesn't
// cache anything
confWithoutCache.setInt(
DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY, 0);
BlockReaderTestUtil util = new BlockReaderTestUtil(1, confWithoutCache);
final Path testFile = new Path("/testConnCache.dat");
util.writeFile(testFile, FILE_SIZE / 1024);
FileSystem fsWithoutCache = FileSystem.newInstance(util.getConf());
try {
DFSTestUtil.readFile(fsWithoutCache, testFile);
assertEquals(0, ((DistributedFileSystem)fsWithoutCache).
dfs.getClientContext().getPeerCache().size());
} finally {
fsWithoutCache.close();
util.shutdown();
}
}
}
| 2,351 | 36.333333 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SYNCONCLOSE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.junit.Assume.assumeTrue;
import java.io.BufferedReader;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.IOException;
import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.UnknownHostException;
import java.security.PrivilegedExceptionAction;
import java.util.EnumSet;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.InvalidPathException;
import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Time;
import org.apache.log4j.Level;
import org.junit.Assert;
import org.junit.Test;
/**
* This class tests various cases during file creation.
*/
public class TestFileCreation {
static final String DIR = "/" + TestFileCreation.class.getSimpleName() + "/";
{
//((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
}
private static final String RPC_DETAILED_METRICS =
"RpcDetailedActivityForPort";
static final long seed = 0xDEADBEEFL;
static final int blockSize = 8192;
static final int numBlocks = 2;
static final int fileSize = numBlocks * blockSize + 1;
boolean simulatedStorage = false;
private static final String[] NON_CANONICAL_PATHS = new String[] {
"//foo",
"///foo2",
"//dir//file",
"////test2/file",
"/dir/./file2",
"/dir/../file3"
};
// creates a file but does not close it
public static FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl)
throws IOException {
System.out.println("createFile: Created " + name + " with " + repl + " replica.");
FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
(short) repl, blockSize);
return stm;
}
public static HdfsDataOutputStream create(DistributedFileSystem dfs,
Path name, int repl) throws IOException {
return (HdfsDataOutputStream)createFile(dfs, name, repl);
}
//
// writes to file but does not close it
//
static void writeFile(FSDataOutputStream stm) throws IOException {
writeFile(stm, fileSize);
}
//
// writes specified bytes to file.
//
public static void writeFile(FSDataOutputStream stm, int size) throws IOException {
byte[] buffer = AppendTestUtil.randomBytes(seed, size);
stm.write(buffer, 0, size);
}
/**
* Test that server default values can be retrieved on the client side
*/
@Test
public void testServerDefaults() throws IOException {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT);
conf.setInt(DFS_BYTES_PER_CHECKSUM_KEY, DFS_BYTES_PER_CHECKSUM_DEFAULT);
conf.setInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY, DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
conf.setInt(DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT + 1);
conf.setInt(IO_FILE_BUFFER_SIZE_KEY, IO_FILE_BUFFER_SIZE_DEFAULT);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(DFSConfigKeys.DFS_REPLICATION_DEFAULT + 1)
.build();
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
try {
FsServerDefaults serverDefaults = fs.getServerDefaults();
assertEquals(DFS_BLOCK_SIZE_DEFAULT, serverDefaults.getBlockSize());
assertEquals(DFS_BYTES_PER_CHECKSUM_DEFAULT, serverDefaults.getBytesPerChecksum());
assertEquals(DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT, serverDefaults.getWritePacketSize());
assertEquals(DFS_REPLICATION_DEFAULT + 1, serverDefaults.getReplication());
assertEquals(IO_FILE_BUFFER_SIZE_DEFAULT, serverDefaults.getFileBufferSize());
} finally {
fs.close();
cluster.shutdown();
}
}
@Test
public void testFileCreation() throws IOException {
checkFileCreation(null, false);
}
/** Same test but the client should use DN hostnames */
@Test
public void testFileCreationUsingHostname() throws IOException {
assumeTrue(System.getProperty("os.name").startsWith("Linux"));
checkFileCreation(null, true);
}
/** Same test but the client should bind to a local interface */
@Test
public void testFileCreationSetLocalInterface() throws IOException {
assumeTrue(System.getProperty("os.name").startsWith("Linux"));
// The mini cluster listens on the loopback so we can use it here
checkFileCreation("lo", false);
try {
checkFileCreation("bogus-interface", false);
fail("Able to specify a bogus interface");
} catch (UnknownHostException e) {
assertEquals("No such interface bogus-interface", e.getMessage());
}
}
/**
* Test if file creation and disk space consumption works right
* @param netIf the local interface, if any, clients should use to access DNs
* @param useDnHostname whether the client should contact DNs by hostname
*/
public void checkFileCreation(String netIf, boolean useDnHostname)
throws IOException {
Configuration conf = new HdfsConfiguration();
if (netIf != null) {
conf.set(DFSConfigKeys.DFS_CLIENT_LOCAL_INTERFACES, netIf);
}
conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME, useDnHostname);
if (useDnHostname) {
// Since the mini cluster only listens on the loopback we have to
// ensure the hostname used to access DNs maps to the loopback. We
// do this by telling the DN to advertise localhost as its hostname
// instead of the default hostname.
conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "localhost");
}
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.checkDataNodeHostConfig(true)
.build();
FileSystem fs = cluster.getFileSystem();
try {
//
// check that / exists
//
Path path = new Path("/");
System.out.println("Path : \"" + path.toString() + "\"");
System.out.println(fs.getFileStatus(path).isDirectory());
assertTrue("/ should be a directory",
fs.getFileStatus(path).isDirectory());
//
// Create a directory inside /, then try to overwrite it
//
Path dir1 = new Path("/test_dir");
fs.mkdirs(dir1);
System.out.println("createFile: Creating " + dir1.getName() +
" for overwrite of existing directory.");
try {
fs.create(dir1, true); // Create path, overwrite=true
fs.close();
assertTrue("Did not prevent directory from being overwritten.", false);
} catch (FileAlreadyExistsException e) {
// expected
}
//
// create a new file in home directory. Do not close it.
//
Path file1 = new Path("filestatus.dat");
Path parent = file1.getParent();
fs.mkdirs(parent);
DistributedFileSystem dfs = (DistributedFileSystem)fs;
dfs.setQuota(file1.getParent(), 100L, blockSize*5);
FSDataOutputStream stm = createFile(fs, file1, 1);
// verify that file exists in FS namespace
assertTrue(file1 + " should be a file",
fs.getFileStatus(file1).isFile());
System.out.println("Path : \"" + file1 + "\"");
// write to file
writeFile(stm);
stm.close();
// verify that file size has changed to the full size
long len = fs.getFileStatus(file1).getLen();
assertTrue(file1 + " should be of size " + fileSize +
" but found to be of size " + len,
len == fileSize);
// verify the disk space the file occupied
long diskSpace = dfs.getContentSummary(file1.getParent()).getLength();
assertEquals(file1 + " should take " + fileSize + " bytes disk space " +
"but found to take " + diskSpace + " bytes", fileSize, diskSpace);
// Check storage usage
// can't check capacities for real storage since the OS file system may be changing under us.
if (simulatedStorage) {
DataNode dn = cluster.getDataNodes().get(0);
FsDatasetSpi<?> dataset = DataNodeTestUtils.getFSDataset(dn);
assertEquals(fileSize, dataset.getDfsUsed());
assertEquals(SimulatedFSDataset.DEFAULT_CAPACITY-fileSize,
dataset.getRemaining());
}
} finally {
cluster.shutdown();
}
}
/**
* Test deleteOnExit
*/
@Test
public void testDeleteOnExit() throws IOException {
Configuration conf = new HdfsConfiguration();
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = cluster.getFileSystem();
FileSystem localfs = FileSystem.getLocal(conf);
try {
// Creates files in HDFS and local file system.
//
Path file1 = new Path("filestatus.dat");
Path file2 = new Path("filestatus2.dat");
Path file3 = new Path("filestatus3.dat");
FSDataOutputStream stm1 = createFile(fs, file1, 1);
FSDataOutputStream stm2 = createFile(fs, file2, 1);
FSDataOutputStream stm3 = createFile(localfs, file3, 1);
System.out.println("DeleteOnExit: Created files.");
// write to files and close. Purposely, do not close file2.
writeFile(stm1);
writeFile(stm3);
stm1.close();
stm2.close();
stm3.close();
// set delete on exit flag on files.
fs.deleteOnExit(file1);
fs.deleteOnExit(file2);
localfs.deleteOnExit(file3);
// close the file system. This should make the above files
// disappear.
fs.close();
localfs.close();
fs = null;
localfs = null;
// reopen file system and verify that file does not exist.
fs = cluster.getFileSystem();
localfs = FileSystem.getLocal(conf);
assertTrue(file1 + " still exists inspite of deletOnExit set.",
!fs.exists(file1));
assertTrue(file2 + " still exists inspite of deletOnExit set.",
!fs.exists(file2));
assertTrue(file3 + " still exists inspite of deletOnExit set.",
!localfs.exists(file3));
System.out.println("DeleteOnExit successful.");
} finally {
IOUtils.closeStream(fs);
IOUtils.closeStream(localfs);
cluster.shutdown();
}
}
/**
* Test that a file which is open for write is overwritten by another
* client. Regression test for HDFS-3755.
*/
@Test
public void testOverwriteOpenForWrite() throws Exception {
Configuration conf = new HdfsConfiguration();
SimulatedFSDataset.setFactory(conf);
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = cluster.getFileSystem();
UserGroupInformation otherUgi = UserGroupInformation.createUserForTesting(
"testuser", new String[]{"testgroup"});
FileSystem fs2 = otherUgi.doAs(new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws Exception {
return FileSystem.get(cluster.getConfiguration(0));
}
});
String metricsName = RPC_DETAILED_METRICS + cluster.getNameNodePort();
try {
Path p = new Path("/testfile");
FSDataOutputStream stm1 = fs.create(p);
stm1.write(1);
assertCounter("CreateNumOps", 1L, getMetrics(metricsName));
// Create file again without overwrite
try {
fs2.create(p, false);
fail("Did not throw!");
} catch (IOException abce) {
GenericTestUtils.assertExceptionContains("Failed to CREATE_FILE", abce);
}
assertCounter("AlreadyBeingCreatedExceptionNumOps",
1L, getMetrics(metricsName));
FSDataOutputStream stm2 = fs2.create(p, true);
stm2.write(2);
stm2.close();
try {
stm1.close();
fail("Should have exception closing stm1 since it was deleted");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("No lease on /testfile", ioe);
GenericTestUtils.assertExceptionContains("File does not exist.", ioe);
}
} finally {
IOUtils.closeStream(fs);
IOUtils.closeStream(fs2);
cluster.shutdown();
}
}
/**
* Test that file data does not become corrupted even in the face of errors.
*/
@Test
public void testFileCreationError1() throws IOException {
Configuration conf = new HdfsConfiguration();
conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY, 1);
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
// create cluster
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = cluster.getFileSystem();
cluster.waitActive();
InetSocketAddress addr = new InetSocketAddress("localhost",
cluster.getNameNodePort());
DFSClient client = new DFSClient(addr, conf);
try {
// create a new file.
//
Path file1 = new Path("/filestatus.dat");
FSDataOutputStream stm = createFile(fs, file1, 1);
// verify that file exists in FS namespace
assertTrue(file1 + " should be a file",
fs.getFileStatus(file1).isFile());
System.out.println("Path : \"" + file1 + "\"");
// kill the datanode
cluster.shutdownDataNodes();
// wait for the datanode to be declared dead
while (true) {
DatanodeInfo[] info = client.datanodeReport(
HdfsConstants.DatanodeReportType.LIVE);
if (info.length == 0) {
break;
}
System.out.println("testFileCreationError1: waiting for datanode " +
" to die.");
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
}
}
// write 1 byte to file.
// This should fail because all datanodes are dead.
byte[] buffer = AppendTestUtil.randomBytes(seed, 1);
try {
stm.write(buffer);
stm.close();
} catch (Exception e) {
System.out.println("Encountered expected exception");
}
// verify that no blocks are associated with this file
// bad block allocations were cleaned up earlier.
LocatedBlocks locations = client.getNamenode().getBlockLocations(
file1.toString(), 0, Long.MAX_VALUE);
System.out.println("locations = " + locations.locatedBlockCount());
assertTrue("Error blocks were not cleaned up",
locations.locatedBlockCount() == 0);
} finally {
cluster.shutdown();
client.close();
}
}
/**
* Test that the filesystem removes the last block from a file if its
* lease expires.
*/
@Test
public void testFileCreationError2() throws IOException {
long leasePeriod = 1000;
System.out.println("testFileCreationError2 start");
Configuration conf = new HdfsConfiguration();
conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY, 1);
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
// create cluster
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
DistributedFileSystem dfs = null;
try {
cluster.waitActive();
dfs = cluster.getFileSystem();
DFSClient client = dfs.dfs;
// create a new file.
//
Path file1 = new Path("/filestatus.dat");
createFile(dfs, file1, 1);
System.out.println("testFileCreationError2: "
+ "Created file filestatus.dat with one replicas.");
LocatedBlocks locations = client.getNamenode().getBlockLocations(
file1.toString(), 0, Long.MAX_VALUE);
System.out.println("testFileCreationError2: "
+ "The file has " + locations.locatedBlockCount() + " blocks.");
// add one block to the file
LocatedBlock location = client.getNamenode().addBlock(file1.toString(),
client.clientName, null, null, HdfsConstants.GRANDFATHER_INODE_ID, null);
System.out.println("testFileCreationError2: "
+ "Added block " + location.getBlock());
locations = client.getNamenode().getBlockLocations(file1.toString(),
0, Long.MAX_VALUE);
int count = locations.locatedBlockCount();
System.out.println("testFileCreationError2: "
+ "The file now has " + count + " blocks.");
// set the soft and hard limit to be 1 second so that the
// namenode triggers lease recovery
cluster.setLeasePeriod(leasePeriod, leasePeriod);
// wait for the lease to expire
try {
Thread.sleep(5 * leasePeriod);
} catch (InterruptedException e) {
}
// verify that the last block was synchronized.
locations = client.getNamenode().getBlockLocations(file1.toString(),
0, Long.MAX_VALUE);
System.out.println("testFileCreationError2: "
+ "locations = " + locations.locatedBlockCount());
assertEquals(0, locations.locatedBlockCount());
System.out.println("testFileCreationError2 successful");
} finally {
IOUtils.closeStream(dfs);
cluster.shutdown();
}
}
/** test addBlock(..) when replication<min and excludeNodes==null. */
@Test
public void testFileCreationError3() throws IOException {
System.out.println("testFileCreationError3 start");
Configuration conf = new HdfsConfiguration();
// create cluster
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
DistributedFileSystem dfs = null;
try {
cluster.waitActive();
dfs = cluster.getFileSystem();
DFSClient client = dfs.dfs;
// create a new file.
final Path f = new Path("/foo.txt");
createFile(dfs, f, 3);
try {
cluster.getNameNodeRpc().addBlock(f.toString(), client.clientName,
null, null, HdfsConstants.GRANDFATHER_INODE_ID, null);
fail();
} catch(IOException ioe) {
FileSystem.LOG.info("GOOD!", ioe);
}
System.out.println("testFileCreationError3 successful");
} finally {
IOUtils.closeStream(dfs);
cluster.shutdown();
}
}
/**
* Test that file leases are persisted across namenode restarts.
*/
@Test
public void testFileCreationNamenodeRestart()
throws IOException, NoSuchFieldException, IllegalAccessException {
Configuration conf = new HdfsConfiguration();
final int MAX_IDLE_TIME = 2000; // 2s
conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY, 1);
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
// create cluster
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
DistributedFileSystem fs = null;
try {
cluster.waitActive();
fs = cluster.getFileSystem();
final int nnport = cluster.getNameNodePort();
// create a new file.
Path file1 = new Path("/filestatus.dat");
HdfsDataOutputStream stm = create(fs, file1, 1);
System.out.println("testFileCreationNamenodeRestart: "
+ "Created file " + file1);
assertEquals(file1 + " should be replicated to 1 datanode.", 1,
stm.getCurrentBlockReplication());
// write two full blocks.
writeFile(stm, numBlocks * blockSize);
stm.hflush();
assertEquals(file1 + " should still be replicated to 1 datanode.", 1,
stm.getCurrentBlockReplication());
// rename file wile keeping it open.
Path fileRenamed = new Path("/filestatusRenamed.dat");
fs.rename(file1, fileRenamed);
System.out.println("testFileCreationNamenodeRestart: "
+ "Renamed file " + file1 + " to " +
fileRenamed);
file1 = fileRenamed;
// create another new file.
//
Path file2 = new Path("/filestatus2.dat");
FSDataOutputStream stm2 = createFile(fs, file2, 1);
System.out.println("testFileCreationNamenodeRestart: "
+ "Created file " + file2);
// create yet another new file with full path name.
// rename it while open
//
Path file3 = new Path("/user/home/fullpath.dat");
FSDataOutputStream stm3 = createFile(fs, file3, 1);
System.out.println("testFileCreationNamenodeRestart: "
+ "Created file " + file3);
Path file4 = new Path("/user/home/fullpath4.dat");
FSDataOutputStream stm4 = createFile(fs, file4, 1);
System.out.println("testFileCreationNamenodeRestart: "
+ "Created file " + file4);
fs.mkdirs(new Path("/bin"));
fs.rename(new Path("/user/home"), new Path("/bin"));
Path file3new = new Path("/bin/home/fullpath.dat");
System.out.println("testFileCreationNamenodeRestart: "
+ "Renamed file " + file3 + " to " +
file3new);
Path file4new = new Path("/bin/home/fullpath4.dat");
System.out.println("testFileCreationNamenodeRestart: "
+ "Renamed file " + file4 + " to " +
file4new);
// restart cluster with the same namenode port as before.
// This ensures that leases are persisted in fsimage.
cluster.shutdown(false, false);
try {
Thread.sleep(2*MAX_IDLE_TIME);
} catch (InterruptedException e) {
}
cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport)
.format(false)
.build();
cluster.waitActive();
// restart cluster yet again. This triggers the code to read in
// persistent leases from fsimage.
cluster.shutdown(false, false);
try {
Thread.sleep(5000);
} catch (InterruptedException e) {
}
cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport)
.format(false)
.build();
cluster.waitActive();
fs = cluster.getFileSystem();
// instruct the dfsclient to use a new filename when it requests
// new blocks for files that were renamed.
DFSOutputStream dfstream = (DFSOutputStream)
(stm.getWrappedStream());
Field f = DFSOutputStream.class.getDeclaredField("src");
Field modifiersField = Field.class.getDeclaredField("modifiers");
modifiersField.setAccessible(true);
modifiersField.setInt(f, f.getModifiers() & ~Modifier.FINAL);
f.setAccessible(true);
f.set(dfstream, file1.toString());
dfstream = (DFSOutputStream) (stm3.getWrappedStream());
f.set(dfstream, file3new.toString());
dfstream = (DFSOutputStream) (stm4.getWrappedStream());
f.set(dfstream, file4new.toString());
// write 1 byte to file. This should succeed because the
// namenode should have persisted leases.
byte[] buffer = AppendTestUtil.randomBytes(seed, 1);
stm.write(buffer);
stm.close();
stm2.write(buffer);
stm2.close();
stm3.close();
stm4.close();
// verify that new block is associated with this file
DFSClient client = fs.dfs;
LocatedBlocks locations = client.getNamenode().getBlockLocations(
file1.toString(), 0, Long.MAX_VALUE);
System.out.println("locations = " + locations.locatedBlockCount());
assertTrue("Error blocks were not cleaned up for file " + file1,
locations.locatedBlockCount() == 3);
// verify filestatus2.dat
locations = client.getNamenode().getBlockLocations(
file2.toString(), 0, Long.MAX_VALUE);
System.out.println("locations = " + locations.locatedBlockCount());
assertTrue("Error blocks were not cleaned up for file " + file2,
locations.locatedBlockCount() == 1);
} finally {
IOUtils.closeStream(fs);
cluster.shutdown();
}
}
/**
* Test that all open files are closed when client dies abnormally.
*/
@Test
public void testDFSClientDeath() throws IOException, InterruptedException {
Configuration conf = new HdfsConfiguration();
System.out.println("Testing adbornal client death.");
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = cluster.getFileSystem();
DistributedFileSystem dfs = (DistributedFileSystem) fs;
DFSClient dfsclient = dfs.dfs;
try {
// create a new file in home directory. Do not close it.
//
Path file1 = new Path("/clienttest.dat");
FSDataOutputStream stm = createFile(fs, file1, 1);
System.out.println("Created file clienttest.dat");
// write to file
writeFile(stm);
// close the dfsclient before closing the output stream.
// This should close all existing file.
dfsclient.close();
// reopen file system and verify that file exists.
assertTrue(file1 + " does not exist.",
AppendTestUtil.createHdfsWithDifferentUsername(conf).exists(file1));
} finally {
cluster.shutdown();
}
}
/**
* Test file creation using createNonRecursive().
*/
@Test
public void testFileCreationNonRecursive() throws IOException {
Configuration conf = new HdfsConfiguration();
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = cluster.getFileSystem();
final Path path = new Path("/" + Time.now()
+ "-testFileCreationNonRecursive");
FSDataOutputStream out = null;
try {
IOException expectedException = null;
final String nonExistDir = "/non-exist-" + Time.now();
fs.delete(new Path(nonExistDir), true);
EnumSet<CreateFlag> createFlag = EnumSet.of(CreateFlag.CREATE);
// Create a new file in root dir, should succeed
out = createNonRecursive(fs, path, 1, createFlag);
out.close();
// Create a file when parent dir exists as file, should fail
try {
createNonRecursive(fs, new Path(path, "Create"), 1, createFlag);
} catch (IOException e) {
expectedException = e;
}
assertTrue("Create a file when parent directory exists as a file"
+ " should throw ParentNotDirectoryException ",
expectedException != null
&& expectedException instanceof ParentNotDirectoryException);
fs.delete(path, true);
// Create a file in a non-exist directory, should fail
final Path path2 = new Path(nonExistDir + "/testCreateNonRecursive");
expectedException = null;
try {
createNonRecursive(fs, path2, 1, createFlag);
} catch (IOException e) {
expectedException = e;
}
assertTrue("Create a file in a non-exist dir using"
+ " createNonRecursive() should throw FileNotFoundException ",
expectedException != null
&& expectedException instanceof FileNotFoundException);
EnumSet<CreateFlag> overwriteFlag =
EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE);
// Overwrite a file in root dir, should succeed
out = createNonRecursive(fs, path, 1, overwriteFlag);
out.close();
// Overwrite a file when parent dir exists as file, should fail
expectedException = null;
try {
createNonRecursive(fs, new Path(path, "Overwrite"), 1, overwriteFlag);
} catch (IOException e) {
expectedException = e;
}
assertTrue("Overwrite a file when parent directory exists as a file"
+ " should throw ParentNotDirectoryException ",
expectedException != null
&& expectedException instanceof ParentNotDirectoryException);
fs.delete(path, true);
// Overwrite a file in a non-exist directory, should fail
final Path path3 = new Path(nonExistDir + "/testOverwriteNonRecursive");
expectedException = null;
try {
createNonRecursive(fs, path3, 1, overwriteFlag);
} catch (IOException e) {
expectedException = e;
}
assertTrue("Overwrite a file in a non-exist dir using"
+ " createNonRecursive() should throw FileNotFoundException ",
expectedException != null
&& expectedException instanceof FileNotFoundException);
} finally {
fs.close();
cluster.shutdown();
}
}
// creates a file using DistributedFileSystem.createNonRecursive()
static FSDataOutputStream createNonRecursive(FileSystem fs, Path name,
int repl, EnumSet<CreateFlag> flag) throws IOException {
System.out.println("createNonRecursive: Created " + name + " with " + repl
+ " replica.");
FSDataOutputStream stm = ((DistributedFileSystem) fs).createNonRecursive(
name, FsPermission.getDefault(), flag, fs.getConf().getInt(
CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), (short) repl, blockSize, null);
return stm;
}
/**
* Test that file data becomes available before file is closed.
*/
@Test
public void testFileCreationSimulated() throws IOException {
simulatedStorage = true;
testFileCreation();
simulatedStorage = false;
}
/**
* Test creating two files at the same time.
*/
@Test
public void testConcurrentFileCreation() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
FileSystem fs = cluster.getFileSystem();
Path[] p = {new Path("/foo"), new Path("/bar")};
//write 2 files at the same time
FSDataOutputStream[] out = {fs.create(p[0]), fs.create(p[1])};
int i = 0;
for(; i < 100; i++) {
out[0].write(i);
out[1].write(i);
}
out[0].close();
for(; i < 200; i++) {out[1].write(i);}
out[1].close();
//verify
FSDataInputStream[] in = {fs.open(p[0]), fs.open(p[1])};
for(i = 0; i < 100; i++) {assertEquals(i, in[0].read());}
for(i = 0; i < 200; i++) {assertEquals(i, in[1].read());}
} finally {
if (cluster != null) {cluster.shutdown();}
}
}
/**
* Test creating a file whose data gets sync when closed
*/
@Test
public void testFileCreationSyncOnClose() throws IOException {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFS_DATANODE_SYNCONCLOSE_KEY, true);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
FileSystem fs = cluster.getFileSystem();
Path[] p = {new Path("/foo"), new Path("/bar")};
//write 2 files at the same time
FSDataOutputStream[] out = {fs.create(p[0]), fs.create(p[1])};
int i = 0;
for(; i < 100; i++) {
out[0].write(i);
out[1].write(i);
}
out[0].close();
for(; i < 200; i++) {out[1].write(i);}
out[1].close();
//verify
FSDataInputStream[] in = {fs.open(p[0]), fs.open(p[1])};
for(i = 0; i < 100; i++) {assertEquals(i, in[0].read());}
for(i = 0; i < 200; i++) {assertEquals(i, in[1].read());}
} finally {
if (cluster != null) {cluster.shutdown();}
}
}
/**
* Create a file, write something, hflush but not close.
* Then change lease period and wait for lease recovery.
* Finally, read the block directly from each Datanode and verify the content.
*/
@Test
public void testLeaseExpireHardLimit() throws Exception {
System.out.println("testLeaseExpireHardLimit start");
final long leasePeriod = 1000;
final int DATANODE_NUM = 3;
Configuration conf = new HdfsConfiguration();
conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY, 1);
// create cluster
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build();
DistributedFileSystem dfs = null;
try {
cluster.waitActive();
dfs = cluster.getFileSystem();
// create a new file.
final String f = DIR + "foo";
final Path fpath = new Path(f);
HdfsDataOutputStream out = create(dfs, fpath, DATANODE_NUM);
out.write("something".getBytes());
out.hflush();
int actualRepl = out.getCurrentBlockReplication();
assertTrue(f + " should be replicated to " + DATANODE_NUM + " datanodes.",
actualRepl == DATANODE_NUM);
// set the soft and hard limit to be 1 second so that the
// namenode triggers lease recovery
cluster.setLeasePeriod(leasePeriod, leasePeriod);
// wait for the lease to expire
try {Thread.sleep(5 * leasePeriod);} catch (InterruptedException e) {}
LocatedBlocks locations = dfs.dfs.getNamenode().getBlockLocations(
f, 0, Long.MAX_VALUE);
assertEquals(1, locations.locatedBlockCount());
LocatedBlock locatedblock = locations.getLocatedBlocks().get(0);
int successcount = 0;
for(DatanodeInfo datanodeinfo: locatedblock.getLocations()) {
DataNode datanode = cluster.getDataNode(datanodeinfo.getIpcPort());
ExtendedBlock blk = locatedblock.getBlock();
Block b = DataNodeTestUtils.getFSDataset(datanode).getStoredBlock(
blk.getBlockPoolId(), blk.getBlockId());
final File blockfile = DataNodeTestUtils.getFile(datanode,
blk.getBlockPoolId(), b.getBlockId());
System.out.println("blockfile=" + blockfile);
if (blockfile != null) {
BufferedReader in = new BufferedReader(new FileReader(blockfile));
assertEquals("something", in.readLine());
in.close();
successcount++;
}
}
System.out.println("successcount=" + successcount);
assertTrue(successcount > 0);
} finally {
IOUtils.closeStream(dfs);
cluster.shutdown();
}
System.out.println("testLeaseExpireHardLimit successful");
}
// test closing file system before all file handles are closed.
@Test
public void testFsClose() throws Exception {
System.out.println("test file system close start");
final int DATANODE_NUM = 3;
Configuration conf = new HdfsConfiguration();
// create cluster
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build();
DistributedFileSystem dfs = null;
try {
cluster.waitActive();
dfs = cluster.getFileSystem();
// create a new file.
final String f = DIR + "foofs";
final Path fpath = new Path(f);
FSDataOutputStream out = TestFileCreation.createFile(dfs, fpath, DATANODE_NUM);
out.write("something".getBytes());
// close file system without closing file
dfs.close();
} finally {
System.out.println("testFsClose successful");
cluster.shutdown();
}
}
// test closing file after cluster is shutdown
@Test
public void testFsCloseAfterClusterShutdown() throws IOException {
System.out.println("test testFsCloseAfterClusterShutdown start");
final int DATANODE_NUM = 3;
Configuration conf = new HdfsConfiguration();
conf.setInt(DFS_NAMENODE_REPLICATION_MIN_KEY, 3);
conf.setBoolean("ipc.client.ping", false); // hdfs timeout is default 60 seconds
conf.setInt("ipc.ping.interval", 10000); // hdfs timeout is now 10 second
// create cluster
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build();
DistributedFileSystem dfs = null;
try {
cluster.waitActive();
dfs = cluster.getFileSystem();
// create a new file.
final String f = DIR + "testFsCloseAfterClusterShutdown";
final Path fpath = new Path(f);
FSDataOutputStream out = TestFileCreation.createFile(dfs, fpath, DATANODE_NUM);
out.write("something_test".getBytes());
out.hflush(); // ensure that block is allocated
// shutdown last datanode in pipeline.
cluster.stopDataNode(2);
// close file. Since we have set the minReplcatio to 3 but have killed one
// of the three datanodes, the close call will loop until the hdfsTimeout is
// encountered.
boolean hasException = false;
try {
out.close();
System.out.println("testFsCloseAfterClusterShutdown: Error here");
} catch (IOException e) {
hasException = true;
}
assertTrue("Failed to close file after cluster shutdown", hasException);
} finally {
System.out.println("testFsCloseAfterClusterShutdown successful");
if (cluster != null) {
cluster.shutdown();
}
}
}
/**
* Regression test for HDFS-3626. Creates a file using a non-canonical path
* (i.e. with extra slashes between components) and makes sure that the NN
* can properly restart.
*
* This test RPCs directly to the NN, to ensure that even an old client
* which passes an invalid path won't cause corrupt edits.
*/
@Test
public void testCreateNonCanonicalPathAndRestartRpc() throws Exception {
doCreateTest(CreationMethod.DIRECT_NN_RPC);
}
/**
* Another regression test for HDFS-3626. This one creates files using
* a Path instantiated from a string object.
*/
@Test
public void testCreateNonCanonicalPathAndRestartFromString()
throws Exception {
doCreateTest(CreationMethod.PATH_FROM_STRING);
}
/**
* Another regression test for HDFS-3626. This one creates files using
* a Path instantiated from a URI object.
*/
@Test
public void testCreateNonCanonicalPathAndRestartFromUri()
throws Exception {
doCreateTest(CreationMethod.PATH_FROM_URI);
}
private static enum CreationMethod {
DIRECT_NN_RPC,
PATH_FROM_URI,
PATH_FROM_STRING
};
private void doCreateTest(CreationMethod method) throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(1).build();
try {
FileSystem fs = cluster.getFileSystem();
NamenodeProtocols nnrpc = cluster.getNameNodeRpc();
for (String pathStr : NON_CANONICAL_PATHS) {
System.out.println("Creating " + pathStr + " by " + method);
switch (method) {
case DIRECT_NN_RPC:
try {
nnrpc.create(pathStr, new FsPermission((short)0755), "client",
new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)),
true, (short)1, 128*1024*1024L, null);
fail("Should have thrown exception when creating '"
+ pathStr + "'" + " by " + method);
} catch (InvalidPathException ipe) {
// When we create by direct NN RPC, the NN just rejects the
// non-canonical paths, rather than trying to normalize them.
// So, we expect all of them to fail.
}
break;
case PATH_FROM_URI:
case PATH_FROM_STRING:
// Unlike the above direct-to-NN case, we expect these to succeed,
// since the Path constructor should normalize the path.
Path p;
if (method == CreationMethod.PATH_FROM_URI) {
p = new Path(new URI(fs.getUri() + pathStr));
} else {
p = new Path(fs.getUri() + pathStr);
}
FSDataOutputStream stm = fs.create(p);
IOUtils.closeStream(stm);
break;
default:
throw new AssertionError("bad method: " + method);
}
}
cluster.restartNameNode();
} finally {
cluster.shutdown();
}
}
/**
* Test complete(..) - verifies that the fileId in the request
* matches that of the Inode.
* This test checks that FileNotFoundException exception is thrown in case
* the fileId does not match.
*/
@Test
public void testFileIdMismatch() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
DistributedFileSystem dfs = null;
try {
cluster.waitActive();
dfs = cluster.getFileSystem();
DFSClient client = dfs.dfs;
final Path f = new Path("/testFileIdMismatch.txt");
createFile(dfs, f, 3);
long someOtherFileId = -1;
try {
cluster.getNameNodeRpc()
.complete(f.toString(), client.clientName, null, someOtherFileId);
fail();
} catch(FileNotFoundException e) {
FileSystem.LOG.info("Caught Expected FileNotFoundException: ", e);
}
} finally {
IOUtils.closeStream(dfs);
cluster.shutdown();
}
}
/**
* 1. Check the blocks of old file are cleaned after creating with overwrite
* 2. Restart NN, check the file
* 3. Save new checkpoint and restart NN, check the file
*/
@Test(timeout = 120000)
public void testFileCreationWithOverwrite() throws Exception {
Configuration conf = new Configuration();
conf.setInt("dfs.blocksize", blockSize);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).
numDataNodes(3).build();
DistributedFileSystem dfs = cluster.getFileSystem();
try {
dfs.mkdirs(new Path("/foo/dir"));
String file = "/foo/dir/file";
Path filePath = new Path(file);
// Case 1: Create file with overwrite, check the blocks of old file
// are cleaned after creating with overwrite
NameNode nn = cluster.getNameNode();
FSNamesystem fsn = NameNodeAdapter.getNamesystem(nn);
BlockManager bm = fsn.getBlockManager();
FSDataOutputStream out = dfs.create(filePath);
byte[] oldData = AppendTestUtil.randomBytes(seed, fileSize);
try {
out.write(oldData);
} finally {
out.close();
}
LocatedBlocks oldBlocks = NameNodeAdapter.getBlockLocations(
nn, file, 0, fileSize);
assertBlocks(bm, oldBlocks, true);
out = dfs.create(filePath, true);
byte[] newData = AppendTestUtil.randomBytes(seed, fileSize);
try {
out.write(newData);
} finally {
out.close();
}
dfs.deleteOnExit(filePath);
LocatedBlocks newBlocks = NameNodeAdapter.getBlockLocations(
nn, file, 0, fileSize);
assertBlocks(bm, newBlocks, true);
assertBlocks(bm, oldBlocks, false);
FSDataInputStream in = dfs.open(filePath);
byte[] result = null;
try {
result = readAll(in);
} finally {
in.close();
}
Assert.assertArrayEquals(newData, result);
// Case 2: Restart NN, check the file
cluster.restartNameNode();
nn = cluster.getNameNode();
in = dfs.open(filePath);
try {
result = readAll(in);
} finally {
in.close();
}
Assert.assertArrayEquals(newData, result);
// Case 3: Save new checkpoint and restart NN, check the file
NameNodeAdapter.enterSafeMode(nn, false);
NameNodeAdapter.saveNamespace(nn);
cluster.restartNameNode();
nn = cluster.getNameNode();
in = dfs.open(filePath);
try {
result = readAll(in);
} finally {
in.close();
}
Assert.assertArrayEquals(newData, result);
} finally {
if (dfs != null) {
dfs.close();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
private void assertBlocks(BlockManager bm, LocatedBlocks lbs,
boolean exist) {
for (LocatedBlock locatedBlock : lbs.getLocatedBlocks()) {
if (exist) {
assertTrue(bm.getStoredBlock(locatedBlock.getBlock().
getLocalBlock()) != null);
} else {
assertTrue(bm.getStoredBlock(locatedBlock.getBlock().
getLocalBlock()) == null);
}
}
}
private byte[] readAll(FSDataInputStream in) throws IOException {
ByteArrayOutputStream out = new ByteArrayOutputStream();
byte[] buffer = new byte[1024];
int n = 0;
while((n = in.read(buffer)) > -1) {
out.write(buffer, 0, n);
}
return out.toByteArray();
}
}
| 48,983 | 35.609865 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_INTERNAL_NAMESERVICES_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYPASSWORD_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY;
import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
import static org.hamcrest.CoreMatchers.not;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.File;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Arrays;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.alias.CredentialProvider;
import org.apache.hadoop.security.alias.CredentialProviderFactory;
import org.apache.hadoop.security.alias.JavaKeyStoreProvider;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Shell;
import org.junit.Assert;
import org.junit.Assume;
import org.junit.Before;
import org.junit.Test;
public class TestDFSUtil {
/**
* Reset to default UGI settings since some tests change them.
*/
@Before
public void resetUGI() {
UserGroupInformation.setConfiguration(new Configuration());
}
/**
* Test conversion of LocatedBlock to BlockLocation
*/
@Test
public void testLocatedBlocks2Locations() {
DatanodeInfo d = DFSTestUtil.getLocalDatanodeInfo();
DatanodeInfo[] ds = new DatanodeInfo[1];
ds[0] = d;
// ok
ExtendedBlock b1 = new ExtendedBlock("bpid", 1, 1, 1);
LocatedBlock l1 = new LocatedBlock(b1, ds);
l1.setStartOffset(0);
l1.setCorrupt(false);
// corrupt
ExtendedBlock b2 = new ExtendedBlock("bpid", 2, 1, 1);
LocatedBlock l2 = new LocatedBlock(b2, ds);
l2.setStartOffset(0);
l2.setCorrupt(true);
List<LocatedBlock> ls = Arrays.asList(l1, l2);
LocatedBlocks lbs = new LocatedBlocks(10, false, ls, l2, true, null);
BlockLocation[] bs = DFSUtilClient.locatedBlocks2Locations(lbs);
assertTrue("expected 2 blocks but got " + bs.length,
bs.length == 2);
int corruptCount = 0;
for (BlockLocation b: bs) {
if (b.isCorrupt()) {
corruptCount++;
}
}
assertTrue("expected 1 corrupt files but got " + corruptCount,
corruptCount == 1);
// test an empty location
bs = DFSUtilClient.locatedBlocks2Locations(new LocatedBlocks());
assertEquals(0, bs.length);
}
/**
* Test constructing LocatedBlock with null cachedLocs
*/
@Test
public void testLocatedBlockConstructorWithNullCachedLocs() {
DatanodeInfo d = DFSTestUtil.getLocalDatanodeInfo();
DatanodeInfo[] ds = new DatanodeInfo[1];
ds[0] = d;
ExtendedBlock b1 = new ExtendedBlock("bpid", 1, 1, 1);
LocatedBlock l1 = new LocatedBlock(b1, ds, null, null, 0, false, null);
final DatanodeInfo[] cachedLocs = l1.getCachedLocations();
assertTrue(cachedLocs.length == 0);
}
private Configuration setupAddress(String key) {
HdfsConfiguration conf = new HdfsConfiguration();
conf.set(DFS_NAMESERVICES, "nn1");
conf.set(DFSUtil.addKeySuffixes(key, "nn1"), "localhost:9000");
return conf;
}
/**
* Test {@link DFSUtil#getNamenodeNameServiceId(Configuration)} to ensure
* nameserviceId from the configuration returned
*/
@Test
public void getNameServiceId() {
HdfsConfiguration conf = new HdfsConfiguration();
conf.set(DFS_NAMESERVICE_ID, "nn1");
assertEquals("nn1", DFSUtil.getNamenodeNameServiceId(conf));
}
/**
* Test {@link DFSUtil#getNamenodeNameServiceId(Configuration)} to ensure
* nameserviceId for namenode is determined based on matching the address with
* local node's address
*/
@Test
public void getNameNodeNameServiceId() {
Configuration conf = setupAddress(DFS_NAMENODE_RPC_ADDRESS_KEY);
assertEquals("nn1", DFSUtil.getNamenodeNameServiceId(conf));
}
/**
* Test {@link DFSUtil#getBackupNameServiceId(Configuration)} to ensure
* nameserviceId for backup node is determined based on matching the address
* with local node's address
*/
@Test
public void getBackupNameServiceId() {
Configuration conf = setupAddress(DFS_NAMENODE_BACKUP_ADDRESS_KEY);
assertEquals("nn1", DFSUtil.getBackupNameServiceId(conf));
}
/**
* Test {@link DFSUtil#getSecondaryNameServiceId(Configuration)} to ensure
* nameserviceId for backup node is determined based on matching the address
* with local node's address
*/
@Test
public void getSecondaryNameServiceId() {
Configuration conf = setupAddress(DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY);
assertEquals("nn1", DFSUtil.getSecondaryNameServiceId(conf));
}
/**
* Test {@link DFSUtil#getNamenodeNameServiceId(Configuration)} to ensure
* exception is thrown when multiple rpc addresses match the local node's
* address
*/
@Test(expected = HadoopIllegalArgumentException.class)
public void testGetNameServiceIdException() {
HdfsConfiguration conf = new HdfsConfiguration();
conf.set(DFS_NAMESERVICES, "nn1,nn2");
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn1"),
"localhost:9000");
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn2"),
"localhost:9001");
DFSUtil.getNamenodeNameServiceId(conf);
fail("Expected exception is not thrown");
}
/**
* Test {@link DFSUtilClient#getNameServiceIds(Configuration)}
*/
@Test
public void testGetNameServiceIds() {
HdfsConfiguration conf = new HdfsConfiguration();
conf.set(DFS_NAMESERVICES, "nn1,nn2");
Collection<String> nameserviceIds = DFSUtilClient.getNameServiceIds(conf);
Iterator<String> it = nameserviceIds.iterator();
assertEquals(2, nameserviceIds.size());
assertEquals("nn1", it.next().toString());
assertEquals("nn2", it.next().toString());
}
@Test
public void testGetOnlyNameServiceIdOrNull() {
HdfsConfiguration conf = new HdfsConfiguration();
conf.set(DFS_NAMESERVICES, "ns1,ns2");
assertNull(DFSUtil.getOnlyNameServiceIdOrNull(conf));
conf.set(DFS_NAMESERVICES, "");
assertNull(DFSUtil.getOnlyNameServiceIdOrNull(conf));
conf.set(DFS_NAMESERVICES, "ns1");
assertEquals("ns1", DFSUtil.getOnlyNameServiceIdOrNull(conf));
}
/**
* Test for {@link DFSUtil#getNNServiceRpcAddresses(Configuration)}
* {@link DFSUtil#getNameServiceIdFromAddress(Configuration, InetSocketAddress, String...)
* (Configuration)}
*/
@Test
public void testMultipleNamenodes() throws IOException {
HdfsConfiguration conf = new HdfsConfiguration();
conf.set(DFS_NAMESERVICES, "nn1,nn2");
// Test - configured list of namenodes are returned
final String NN1_ADDRESS = "localhost:9000";
final String NN2_ADDRESS = "localhost:9001";
final String NN3_ADDRESS = "localhost:9002";
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn1"),
NN1_ADDRESS);
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn2"),
NN2_ADDRESS);
Map<String, Map<String, InetSocketAddress>> nnMap = DFSUtil
.getNNServiceRpcAddresses(conf);
assertEquals(2, nnMap.size());
Map<String, InetSocketAddress> nn1Map = nnMap.get("nn1");
assertEquals(1, nn1Map.size());
InetSocketAddress addr = nn1Map.get(null);
assertEquals("localhost", addr.getHostName());
assertEquals(9000, addr.getPort());
Map<String, InetSocketAddress> nn2Map = nnMap.get("nn2");
assertEquals(1, nn2Map.size());
addr = nn2Map.get(null);
assertEquals("localhost", addr.getHostName());
assertEquals(9001, addr.getPort());
// Test - can look up nameservice ID from service address
checkNameServiceId(conf, NN1_ADDRESS, "nn1");
checkNameServiceId(conf, NN2_ADDRESS, "nn2");
checkNameServiceId(conf, NN3_ADDRESS, null);
// HA is not enabled in a purely federated config
assertFalse(HAUtil.isHAEnabled(conf, "nn1"));
assertFalse(HAUtil.isHAEnabled(conf, "nn2"));
}
public void checkNameServiceId(Configuration conf, String addr,
String expectedNameServiceId) {
InetSocketAddress s = NetUtils.createSocketAddr(addr);
String nameserviceId = DFSUtil.getNameServiceIdFromAddress(conf, s,
DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
assertEquals(expectedNameServiceId, nameserviceId);
}
/** Tests to ensure default namenode is used as fallback */
@Test
public void testDefaultNamenode() throws IOException {
HdfsConfiguration conf = new HdfsConfiguration();
final String hdfs_default = "hdfs://localhost:9999/";
conf.set(FS_DEFAULT_NAME_KEY, hdfs_default);
// If DFS_FEDERATION_NAMESERVICES is not set, verify that
// default namenode address is returned.
Map<String, Map<String, InetSocketAddress>> addrMap =
DFSUtil.getNNServiceRpcAddresses(conf);
assertEquals(1, addrMap.size());
Map<String, InetSocketAddress> defaultNsMap = addrMap.get(null);
assertEquals(1, defaultNsMap.size());
assertEquals(9999, defaultNsMap.get(null).getPort());
}
/**
* Test to ensure nameservice specific keys in the configuration are
* copied to generic keys when the namenode starts.
*/
@Test
public void testConfModificationFederationOnly() {
final HdfsConfiguration conf = new HdfsConfiguration();
String nsId = "ns1";
conf.set(DFS_NAMESERVICES, nsId);
conf.set(DFS_NAMESERVICE_ID, nsId);
// Set the nameservice specific keys with nameserviceId in the config key
for (String key : NameNode.NAMENODE_SPECIFIC_KEYS) {
// Note: value is same as the key
conf.set(DFSUtil.addKeySuffixes(key, nsId), key);
}
// Initialize generic keys from specific keys
NameNode.initializeGenericKeys(conf, nsId, null);
// Retrieve the keys without nameserviceId and Ensure generic keys are set
// to the correct value
for (String key : NameNode.NAMENODE_SPECIFIC_KEYS) {
assertEquals(key, conf.get(key));
}
}
/**
* Test to ensure nameservice specific keys in the configuration are
* copied to generic keys when the namenode starts.
*/
@Test
public void testConfModificationFederationAndHa() {
final HdfsConfiguration conf = new HdfsConfiguration();
String nsId = "ns1";
String nnId = "nn1";
conf.set(DFS_NAMESERVICES, nsId);
conf.set(DFS_NAMESERVICE_ID, nsId);
conf.set(DFS_HA_NAMENODES_KEY_PREFIX + "." + nsId, nnId);
// Set the nameservice specific keys with nameserviceId in the config key
for (String key : NameNode.NAMENODE_SPECIFIC_KEYS) {
// Note: value is same as the key
conf.set(DFSUtil.addKeySuffixes(key, nsId, nnId), key);
}
// Initialize generic keys from specific keys
NameNode.initializeGenericKeys(conf, nsId, nnId);
// Retrieve the keys without nameserviceId and Ensure generic keys are set
// to the correct value
for (String key : NameNode.NAMENODE_SPECIFIC_KEYS) {
assertEquals(key, conf.get(key));
}
}
/**
* Ensure that fs.defaultFS is set in the configuration even if neither HA nor
* Federation is enabled.
*
* Regression test for HDFS-3351.
*/
@Test
public void testConfModificationNoFederationOrHa() {
final HdfsConfiguration conf = new HdfsConfiguration();
String nsId = null;
String nnId = null;
conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, "localhost:1234");
assertFalse("hdfs://localhost:1234".equals(conf.get(FS_DEFAULT_NAME_KEY)));
NameNode.initializeGenericKeys(conf, nsId, nnId);
assertEquals("hdfs://localhost:1234", conf.get(FS_DEFAULT_NAME_KEY));
}
/**
* Regression test for HDFS-2934.
*/
@Test
public void testSomeConfsNNSpecificSomeNSSpecific() {
final HdfsConfiguration conf = new HdfsConfiguration();
String key = DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY;
conf.set(key, "global-default");
conf.set(key + ".ns1", "ns1-override");
conf.set(key + ".ns1.nn1", "nn1-override");
// A namenode in another nameservice should get the global default.
Configuration newConf = new Configuration(conf);
NameNode.initializeGenericKeys(newConf, "ns2", "nn1");
assertEquals("global-default", newConf.get(key));
// A namenode in another non-HA nameservice should get global default.
newConf = new Configuration(conf);
NameNode.initializeGenericKeys(newConf, "ns2", null);
assertEquals("global-default", newConf.get(key));
// A namenode in the same nameservice should get the ns setting
newConf = new Configuration(conf);
NameNode.initializeGenericKeys(newConf, "ns1", "nn2");
assertEquals("ns1-override", newConf.get(key));
// The nn with the nn-specific setting should get its own override
newConf = new Configuration(conf);
NameNode.initializeGenericKeys(newConf, "ns1", "nn1");
assertEquals("nn1-override", newConf.get(key));
}
/**
* Tests for empty configuration, an exception is thrown from
* {@link DFSUtil#getNNServiceRpcAddresses(Configuration)}
* {@link DFSUtil#getBackupNodeAddresses(Configuration)}
* {@link DFSUtil#getSecondaryNameNodeAddresses(Configuration)}
*/
@Test
public void testEmptyConf() {
HdfsConfiguration conf = new HdfsConfiguration(false);
try {
Map<String, Map<String, InetSocketAddress>> map =
DFSUtil.getNNServiceRpcAddresses(conf);
fail("Expected IOException is not thrown, result was: " +
DFSUtil.addressMapToString(map));
} catch (IOException expected) {
/** Expected */
}
try {
Map<String, Map<String, InetSocketAddress>> map =
DFSUtil.getBackupNodeAddresses(conf);
fail("Expected IOException is not thrown, result was: " +
DFSUtil.addressMapToString(map));
} catch (IOException expected) {
/** Expected */
}
try {
Map<String, Map<String, InetSocketAddress>> map =
DFSUtil.getSecondaryNameNodeAddresses(conf);
fail("Expected IOException is not thrown, result was: " +
DFSUtil.addressMapToString(map));
} catch (IOException expected) {
/** Expected */
}
}
@Test
public void testGetInfoServer() throws IOException, URISyntaxException {
HdfsConfiguration conf = new HdfsConfiguration();
URI httpsport = DFSUtil.getInfoServer(null, conf, "https");
assertEquals(new URI("https", null, "0.0.0.0",
DFS_NAMENODE_HTTPS_PORT_DEFAULT, null, null, null), httpsport);
URI httpport = DFSUtil.getInfoServer(null, conf, "http");
assertEquals(new URI("http", null, "0.0.0.0",
DFS_NAMENODE_HTTP_PORT_DEFAULT, null, null, null), httpport);
URI httpAddress = DFSUtil.getInfoServer(new InetSocketAddress(
"localhost", 8020), conf, "http");
assertEquals(
URI.create("http://localhost:" + DFS_NAMENODE_HTTP_PORT_DEFAULT),
httpAddress);
}
@Test
public void testHANameNodesWithFederation() throws URISyntaxException {
HdfsConfiguration conf = new HdfsConfiguration();
final String NS1_NN1_HOST = "ns1-nn1.example.com:8020";
final String NS1_NN2_HOST = "ns1-nn2.example.com:8020";
final String NS2_NN1_HOST = "ns2-nn1.example.com:8020";
final String NS2_NN2_HOST = "ns2-nn2.example.com:8020";
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://ns1");
// Two nameservices, each with two NNs.
conf.set(DFS_NAMESERVICES, "ns1,ns2");
conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, "ns1"),
"ns1-nn1,ns1-nn2");
conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, "ns2"),
"ns2-nn1,ns2-nn2");
conf.set(DFSUtil.addKeySuffixes(
DFS_NAMENODE_RPC_ADDRESS_KEY, "ns1", "ns1-nn1"),
NS1_NN1_HOST);
conf.set(DFSUtil.addKeySuffixes(
DFS_NAMENODE_RPC_ADDRESS_KEY, "ns1", "ns1-nn2"),
NS1_NN2_HOST);
conf.set(DFSUtil.addKeySuffixes(
DFS_NAMENODE_RPC_ADDRESS_KEY, "ns2", "ns2-nn1"),
NS2_NN1_HOST);
conf.set(DFSUtil.addKeySuffixes(
DFS_NAMENODE_RPC_ADDRESS_KEY, "ns2", "ns2-nn2"),
NS2_NN2_HOST);
Map<String, Map<String, InetSocketAddress>> map =
DFSUtil.getHaNnRpcAddresses(conf);
assertTrue(HAUtil.isHAEnabled(conf, "ns1"));
assertTrue(HAUtil.isHAEnabled(conf, "ns2"));
assertFalse(HAUtil.isHAEnabled(conf, "ns3"));
assertEquals(NS1_NN1_HOST, map.get("ns1").get("ns1-nn1").toString());
assertEquals(NS1_NN2_HOST, map.get("ns1").get("ns1-nn2").toString());
assertEquals(NS2_NN1_HOST, map.get("ns2").get("ns2-nn1").toString());
assertEquals(NS2_NN2_HOST, map.get("ns2").get("ns2-nn2").toString());
assertEquals(NS1_NN1_HOST,
DFSUtil.getNamenodeServiceAddr(conf, "ns1", "ns1-nn1"));
assertEquals(NS1_NN2_HOST,
DFSUtil.getNamenodeServiceAddr(conf, "ns1", "ns1-nn2"));
assertEquals(NS2_NN1_HOST,
DFSUtil.getNamenodeServiceAddr(conf, "ns2", "ns2-nn1"));
// No nameservice was given and we can't determine which service addr
// to use as two nameservices could share a namenode ID.
assertEquals(null, DFSUtil.getNamenodeServiceAddr(conf, null, "ns1-nn1"));
// Ditto for nameservice IDs, if multiple are defined
assertEquals(null, DFSUtil.getNamenodeNameServiceId(conf));
assertEquals(null, DFSUtil.getSecondaryNameServiceId(conf));
Collection<URI> uris = DFSUtil.getNameServiceUris(conf, DFS_NAMENODE_RPC_ADDRESS_KEY);
assertEquals(2, uris.size());
assertTrue(uris.contains(new URI("hdfs://ns1")));
assertTrue(uris.contains(new URI("hdfs://ns2")));
}
@Test
public void getNameNodeServiceAddr() throws IOException {
HdfsConfiguration conf = new HdfsConfiguration();
// One nameservice with two NNs
final String NS1_NN1_HOST = "ns1-nn1.example.com:8020";
final String NS1_NN1_HOST_SVC = "ns1-nn2.example.com:8021";
final String NS1_NN2_HOST = "ns1-nn1.example.com:8020";
final String NS1_NN2_HOST_SVC = "ns1-nn2.example.com:8021";
conf.set(DFS_NAMESERVICES, "ns1");
conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, "ns1"),"nn1,nn2");
conf.set(DFSUtil.addKeySuffixes(
DFS_NAMENODE_RPC_ADDRESS_KEY, "ns1", "nn1"), NS1_NN1_HOST);
conf.set(DFSUtil.addKeySuffixes(
DFS_NAMENODE_RPC_ADDRESS_KEY, "ns1", "nn2"), NS1_NN2_HOST);
// The rpc address is used if no service address is defined
assertEquals(NS1_NN1_HOST, DFSUtil.getNamenodeServiceAddr(conf, null, "nn1"));
assertEquals(NS1_NN2_HOST, DFSUtil.getNamenodeServiceAddr(conf, null, "nn2"));
// A nameservice is specified explicitly
assertEquals(NS1_NN1_HOST, DFSUtil.getNamenodeServiceAddr(conf, "ns1", "nn1"));
assertEquals(null, DFSUtil.getNamenodeServiceAddr(conf, "invalid", "nn1"));
// The service addrs are used when they are defined
conf.set(DFSUtil.addKeySuffixes(
DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "ns1", "nn1"), NS1_NN1_HOST_SVC);
conf.set(DFSUtil.addKeySuffixes(
DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "ns1", "nn2"), NS1_NN2_HOST_SVC);
assertEquals(NS1_NN1_HOST_SVC, DFSUtil.getNamenodeServiceAddr(conf, null, "nn1"));
assertEquals(NS1_NN2_HOST_SVC, DFSUtil.getNamenodeServiceAddr(conf, null, "nn2"));
// We can determine the nameservice ID, there's only one listed
assertEquals("ns1", DFSUtil.getNamenodeNameServiceId(conf));
assertEquals("ns1", DFSUtil.getSecondaryNameServiceId(conf));
}
@Test
public void testGetHaNnHttpAddresses() throws IOException {
final String LOGICAL_HOST_NAME = "ns1";
final String NS1_NN1_ADDR = "ns1-nn1.example.com:8020";
final String NS1_NN2_ADDR = "ns1-nn2.example.com:8020";
Configuration conf = createWebHDFSHAConfiguration(LOGICAL_HOST_NAME, NS1_NN1_ADDR, NS1_NN2_ADDR);
Map<String, Map<String, InetSocketAddress>> map =
DFSUtilClient.getHaNnWebHdfsAddresses(conf, "webhdfs");
assertEquals(NS1_NN1_ADDR, map.get("ns1").get("nn1").toString());
assertEquals(NS1_NN2_ADDR, map.get("ns1").get("nn2").toString());
}
private static Configuration createWebHDFSHAConfiguration(String logicalHostName, String nnaddr1, String nnaddr2) {
HdfsConfiguration conf = new HdfsConfiguration();
conf.set(DFS_NAMESERVICES, "ns1");
conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, "ns1"),"nn1,nn2");
conf.set(DFSUtil.addKeySuffixes(
DFS_NAMENODE_HTTP_ADDRESS_KEY, "ns1", "nn1"), nnaddr1);
conf.set(DFSUtil.addKeySuffixes(
DFS_NAMENODE_HTTP_ADDRESS_KEY, "ns1", "nn2"), nnaddr2);
conf.set(HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX + "." + logicalHostName,
ConfiguredFailoverProxyProvider.class.getName());
return conf;
}
@Test
public void testSubstituteForWildcardAddress() throws IOException {
assertEquals("foo:12345",
DFSUtil.substituteForWildcardAddress("0.0.0.0:12345", "foo"));
assertEquals("127.0.0.1:12345",
DFSUtil.substituteForWildcardAddress("127.0.0.1:12345", "foo"));
}
@Test
public void testGetNNUris() throws Exception {
HdfsConfiguration conf = new HdfsConfiguration();
final String NS1_NN1_ADDR = "ns1-nn1.example.com:8020";
final String NS1_NN2_ADDR = "ns1-nn2.example.com:8020";
final String NS2_NN_ADDR = "ns2-nn.example.com:8020";
final String NN1_ADDR = "nn.example.com:8020";
final String NN1_SRVC_ADDR = "nn.example.com:8021";
final String NN2_ADDR = "nn2.example.com:8020";
conf.set(DFS_NAMESERVICES, "ns1,ns2");
conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, "ns1"),"nn1,nn2");
conf.set(DFSUtil.addKeySuffixes(
DFS_NAMENODE_RPC_ADDRESS_KEY, "ns1", "nn1"), NS1_NN1_ADDR);
conf.set(DFSUtil.addKeySuffixes(
DFS_NAMENODE_RPC_ADDRESS_KEY, "ns1", "nn2"), NS1_NN2_ADDR);
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "ns2"),
NS2_NN_ADDR);
conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, "hdfs://" + NN1_ADDR);
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://" + NN2_ADDR);
Collection<URI> uris = DFSUtil.getNameServiceUris(conf,
DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
assertEquals(4, uris.size());
assertTrue(uris.contains(new URI("hdfs://ns1")));
assertTrue(uris.contains(new URI("hdfs://" + NS2_NN_ADDR)));
assertTrue(uris.contains(new URI("hdfs://" + NN1_ADDR)));
assertTrue(uris.contains(new URI("hdfs://" + NN2_ADDR)));
// Make sure that non-HDFS URIs in fs.defaultFS don't get included.
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,
"viewfs://vfs-name.example.com");
uris = DFSUtil.getNameServiceUris(conf, DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
DFS_NAMENODE_RPC_ADDRESS_KEY);
assertEquals(3, uris.size());
assertTrue(uris.contains(new URI("hdfs://ns1")));
assertTrue(uris.contains(new URI("hdfs://" + NS2_NN_ADDR)));
assertTrue(uris.contains(new URI("hdfs://" + NN1_ADDR)));
// Make sure that an HA URI being the default URI doesn't result in multiple
// entries being returned.
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://ns1");
uris = DFSUtil.getNameServiceUris(conf, DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
DFS_NAMENODE_RPC_ADDRESS_KEY);
assertEquals(3, uris.size());
assertTrue(uris.contains(new URI("hdfs://ns1")));
assertTrue(uris.contains(new URI("hdfs://" + NS2_NN_ADDR)));
assertTrue(uris.contains(new URI("hdfs://" + NN1_ADDR)));
// Make sure that when a service RPC address is used that is distinct from
// the client RPC address, and that client RPC address is also used as the
// default URI, that the client URI does not end up in the set of URIs
// returned.
conf = new HdfsConfiguration();
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://" + NN1_ADDR);
conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, NN1_ADDR);
conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, NN1_SRVC_ADDR);
uris = DFSUtil.getNameServiceUris(conf, DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
DFS_NAMENODE_RPC_ADDRESS_KEY);
assertEquals(1, uris.size());
assertTrue(uris.contains(new URI("hdfs://" + NN1_SRVC_ADDR)));
}
@Test (timeout=15000)
public void testLocalhostReverseLookup() {
// 127.0.0.1 -> localhost reverse resolution does not happen on Windows.
Assume.assumeTrue(!Shell.WINDOWS);
// Make sure when config FS_DEFAULT_NAME_KEY using IP address,
// it will automatically convert it to hostname
HdfsConfiguration conf = new HdfsConfiguration();
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://127.0.0.1:8020");
Collection<URI> uris = DFSUtil.getNameServiceUris(conf);
assertEquals(1, uris.size());
for (URI uri : uris) {
assertThat(uri.getHost(), not("127.0.0.1"));
}
}
@Test (timeout=15000)
public void testIsValidName() {
assertFalse(DFSUtil.isValidName("/foo/../bar"));
assertFalse(DFSUtil.isValidName("/foo/./bar"));
assertFalse(DFSUtil.isValidName("/foo//bar"));
assertTrue(DFSUtil.isValidName("/"));
assertTrue(DFSUtil.isValidName("/bar/"));
assertFalse(DFSUtil.isValidName("/foo/:/bar"));
assertFalse(DFSUtil.isValidName("/foo:bar"));
}
@Test(timeout=5000)
public void testGetSpnegoKeytabKey() {
HdfsConfiguration conf = new HdfsConfiguration();
String defaultKey = "default.spengo.key";
conf.unset(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY);
assertEquals("Test spnego key in config is null", defaultKey,
DFSUtil.getSpnegoKeytabKey(conf, defaultKey));
conf.set(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY, "");
assertEquals("Test spnego key is empty", defaultKey,
DFSUtil.getSpnegoKeytabKey(conf, defaultKey));
String spengoKey = "spengo.key";
conf.set(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,
spengoKey);
assertEquals("Test spnego key is NOT null",
DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,
DFSUtil.getSpnegoKeytabKey(conf, defaultKey));
}
@Test(timeout=1000)
public void testDurationToString() throws Exception {
assertEquals("000:00:00:00.000", DFSUtil.durationToString(0));
assertEquals("001:01:01:01.000",
DFSUtil.durationToString(((24*60*60)+(60*60)+(60)+1)*1000));
assertEquals("000:23:59:59.999",
DFSUtil.durationToString(((23*60*60)+(59*60)+(59))*1000+999));
assertEquals("-001:01:01:01.000",
DFSUtil.durationToString(-((24*60*60)+(60*60)+(60)+1)*1000));
assertEquals("-000:23:59:59.574",
DFSUtil.durationToString(-(((23*60*60)+(59*60)+(59))*1000+574)));
}
@Test(timeout=5000)
public void testRelativeTimeConversion() throws Exception {
try {
DFSUtil.parseRelativeTime("1");
} catch (IOException e) {
assertExceptionContains("too short", e);
}
try {
DFSUtil.parseRelativeTime("1z");
} catch (IOException e) {
assertExceptionContains("unknown time unit", e);
}
try {
DFSUtil.parseRelativeTime("yyz");
} catch (IOException e) {
assertExceptionContains("is not a number", e);
}
assertEquals(61*1000, DFSUtil.parseRelativeTime("61s"));
assertEquals(61*60*1000, DFSUtil.parseRelativeTime("61m"));
assertEquals(0, DFSUtil.parseRelativeTime("0s"));
assertEquals(25*60*60*1000, DFSUtil.parseRelativeTime("25h"));
assertEquals(4*24*60*60*1000l, DFSUtil.parseRelativeTime("4d"));
assertEquals(999*24*60*60*1000l, DFSUtil.parseRelativeTime("999d"));
}
@Test
public void testAssertAllResultsEqual() {
checkAllResults(new Long[]{}, true);
checkAllResults(new Long[]{1l}, true);
checkAllResults(new Long[]{1l, 1l}, true);
checkAllResults(new Long[]{1l, 1l, 1l}, true);
checkAllResults(new Long[]{new Long(1), new Long(1)}, true);
checkAllResults(new Long[]{null, null, null}, true);
checkAllResults(new Long[]{1l, 2l}, false);
checkAllResults(new Long[]{2l, 1l}, false);
checkAllResults(new Long[]{1l, 2l, 1l}, false);
checkAllResults(new Long[]{2l, 1l, 1l}, false);
checkAllResults(new Long[]{1l, 1l, 2l}, false);
checkAllResults(new Long[]{1l, null}, false);
checkAllResults(new Long[]{null, 1l}, false);
checkAllResults(new Long[]{1l, null, 1l}, false);
}
private static void checkAllResults(Long[] toCheck, boolean shouldSucceed) {
if (shouldSucceed) {
DFSUtil.assertAllResultsEqual(Arrays.asList(toCheck));
} else {
try {
DFSUtil.assertAllResultsEqual(Arrays.asList(toCheck));
fail("Should not have succeeded with input: " +
Arrays.toString(toCheck));
} catch (AssertionError ae) {
GenericTestUtils.assertExceptionContains("Not all elements match", ae);
}
}
}
@Test
public void testGetPassword() throws Exception {
File testDir = new File(System.getProperty("test.build.data",
"target/test-dir"));
Configuration conf = new Configuration();
final Path jksPath = new Path(testDir.toString(), "test.jks");
final String ourUrl =
JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri();
File file = new File(testDir, "test.jks");
file.delete();
conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, ourUrl);
CredentialProvider provider =
CredentialProviderFactory.getProviders(conf).get(0);
char[] keypass = {'k', 'e', 'y', 'p', 'a', 's', 's'};
char[] storepass = {'s', 't', 'o', 'r', 'e', 'p', 'a', 's', 's'};
char[] trustpass = {'t', 'r', 'u', 's', 't', 'p', 'a', 's', 's'};
// ensure that we get nulls when the key isn't there
assertEquals(null, provider.getCredentialEntry(
DFS_SERVER_HTTPS_KEYPASSWORD_KEY));
assertEquals(null, provider.getCredentialEntry(
DFS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY));
assertEquals(null, provider.getCredentialEntry(
DFS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY));
// create new aliases
try {
provider.createCredentialEntry(
DFS_SERVER_HTTPS_KEYPASSWORD_KEY, keypass);
provider.createCredentialEntry(
DFS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY, storepass);
provider.createCredentialEntry(
DFS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY, trustpass);
// write out so that it can be found in checks
provider.flush();
} catch (Exception e) {
e.printStackTrace();
throw e;
}
// make sure we get back the right key directly from api
assertArrayEquals(keypass, provider.getCredentialEntry(
DFS_SERVER_HTTPS_KEYPASSWORD_KEY).getCredential());
assertArrayEquals(storepass, provider.getCredentialEntry(
DFS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY).getCredential());
assertArrayEquals(trustpass, provider.getCredentialEntry(
DFS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY).getCredential());
// use WebAppUtils as would be used by loadSslConfiguration
Assert.assertEquals("keypass",
DFSUtil.getPassword(conf, DFS_SERVER_HTTPS_KEYPASSWORD_KEY));
Assert.assertEquals("storepass",
DFSUtil.getPassword(conf, DFS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY));
Assert.assertEquals("trustpass",
DFSUtil.getPassword(conf, DFS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY));
// let's make sure that a password that doesn't exist returns null
Assert.assertEquals(null, DFSUtil.getPassword(conf,"invalid-alias"));
}
@Test
public void testGetNNServiceRpcAddressesForNsIds() throws IOException {
Configuration conf = new HdfsConfiguration();
conf.set(DFS_NAMESERVICES, "nn1,nn2");
conf.set(DFS_INTERNAL_NAMESERVICES_KEY, "nn1");
// Test - configured list of namenodes are returned
final String NN1_ADDRESS = "localhost:9000";
final String NN2_ADDRESS = "localhost:9001";
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn1"),
NN1_ADDRESS);
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn2"),
NN2_ADDRESS);
Map<String, Map<String, InetSocketAddress>> nnMap = DFSUtil
.getNNServiceRpcAddressesForCluster(conf);
assertEquals(1, nnMap.size());
assertTrue(nnMap.containsKey("nn1"));
conf.set(DFS_INTERNAL_NAMESERVICES_KEY, "nn3");
try {
DFSUtil.getNNServiceRpcAddressesForCluster(conf);
fail("Should fail for misconfiguration");
} catch (IOException ignored) {
}
}
@Test
public void testEncryptionProbe() throws Throwable {
Configuration conf = new Configuration(false);
conf.unset(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI);
assertFalse("encryption enabled on no provider key",
DFSUtil.isHDFSEncryptionEnabled(conf));
conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, "");
assertFalse("encryption enabled on empty provider key",
DFSUtil.isHDFSEncryptionEnabled(conf));
conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, "\n\t\n");
assertFalse("encryption enabled on whitespace provider key",
DFSUtil.isHDFSEncryptionEnabled(conf));
conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, "http://hadoop.apache.org");
assertTrue("encryption disabled on valid provider key",
DFSUtil.isHDFSEncryptionEnabled(conf));
}
}
| 36,405 | 38.614799 | 117 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.EnumSet;
import java.util.List;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.Test;
/**
* This class tests the building blocks that are needed to
* support HDFS appends.
*/
public class TestFileAppend2 {
{
DFSTestUtil.setNameNodeLogLevel(Level.ALL);
GenericTestUtils.setLogLevel(DataNode.LOG, Level.ALL);
GenericTestUtils.setLogLevel(DFSClient.LOG, Level.ALL);
}
static final int numBlocks = 5;
final boolean simulatedStorage = false;
private byte[] fileContents = null;
final int numDatanodes = 6;
final int numberOfFiles = 50;
final int numThreads = 10;
final int numAppendsPerThread = 20;
Workload[] workload = null;
final ArrayList<Path> testFiles = new ArrayList<Path>();
volatile static boolean globalStatus = true;
/**
* Creates one file, writes a few bytes to it and then closed it.
* Reopens the same file for appending, write all blocks and then close.
* Verify that all data exists in file.
* @throws IOException an exception might be thrown
*/
@Test
public void testSimpleAppend() throws IOException {
final Configuration conf = new HdfsConfiguration();
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 50);
fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = cluster.getFileSystem();
try {
{ // test appending to a file.
// create a new file.
Path file1 = new Path("/simpleAppend.dat");
FSDataOutputStream stm = AppendTestUtil.createFile(fs, file1, 1);
System.out.println("Created file simpleAppend.dat");
// write to file
int mid = 186; // io.bytes.per.checksum bytes
System.out.println("Writing " + mid + " bytes to file " + file1);
stm.write(fileContents, 0, mid);
stm.close();
System.out.println("Wrote and Closed first part of file.");
// write to file
int mid2 = 607; // io.bytes.per.checksum bytes
System.out.println("Writing " + mid + " bytes to file " + file1);
stm = fs.append(file1);
stm.write(fileContents, mid, mid2-mid);
stm.close();
System.out.println("Wrote and Closed second part of file.");
// write the remainder of the file
stm = fs.append(file1);
// ensure getPos is set to reflect existing size of the file
assertTrue(stm.getPos() > 0);
System.out.println("Writing " + (AppendTestUtil.FILE_SIZE - mid2) +
" bytes to file " + file1);
stm.write(fileContents, mid2, AppendTestUtil.FILE_SIZE - mid2);
System.out.println("Written second part of file");
stm.close();
System.out.println("Wrote and Closed second part of file.");
// verify that entire file is good
AppendTestUtil.checkFullFile(fs, file1, AppendTestUtil.FILE_SIZE,
fileContents, "Read 2");
}
{ // test appending to an non-existing file.
FSDataOutputStream out = null;
try {
out = fs.append(new Path("/non-existing.dat"));
fail("Expected to have FileNotFoundException");
}
catch(java.io.FileNotFoundException fnfe) {
System.out.println("Good: got " + fnfe);
fnfe.printStackTrace(System.out);
}
finally {
IOUtils.closeStream(out);
}
}
{ // test append permission.
//set root to all writable
Path root = new Path("/");
fs.setPermission(root, new FsPermission((short)0777));
fs.close();
// login as a different user
final UserGroupInformation superuser =
UserGroupInformation.getCurrentUser();
String username = "testappenduser";
String group = "testappendgroup";
assertFalse(superuser.getShortUserName().equals(username));
assertFalse(Arrays.asList(superuser.getGroupNames()).contains(group));
UserGroupInformation appenduser =
UserGroupInformation.createUserForTesting(username, new String[]{group});
fs = DFSTestUtil.getFileSystemAs(appenduser, conf);
// create a file
Path dir = new Path(root, getClass().getSimpleName());
Path foo = new Path(dir, "foo.dat");
FSDataOutputStream out = null;
int offset = 0;
try {
out = fs.create(foo);
int len = 10 + AppendTestUtil.nextInt(100);
out.write(fileContents, offset, len);
offset += len;
}
finally {
IOUtils.closeStream(out);
}
// change dir and foo to minimal permissions.
fs.setPermission(dir, new FsPermission((short)0100));
fs.setPermission(foo, new FsPermission((short)0200));
// try append, should success
out = null;
try {
out = fs.append(foo);
int len = 10 + AppendTestUtil.nextInt(100);
out.write(fileContents, offset, len);
offset += len;
}
finally {
IOUtils.closeStream(out);
}
// change dir and foo to all but no write on foo.
fs.setPermission(foo, new FsPermission((short)0577));
fs.setPermission(dir, new FsPermission((short)0777));
// try append, should fail
out = null;
try {
out = fs.append(foo);
fail("Expected to have AccessControlException");
}
catch(AccessControlException ace) {
System.out.println("Good: got " + ace);
ace.printStackTrace(System.out);
}
finally {
IOUtils.closeStream(out);
}
}
} catch (IOException e) {
System.out.println("Exception :" + e);
throw e;
} catch (Throwable e) {
System.out.println("Throwable :" + e);
e.printStackTrace();
throw new IOException("Throwable : " + e);
} finally {
fs.close();
cluster.shutdown();
}
}
/**
* Creates one file, writes a few bytes to it and then closed it.
* Reopens the same file for appending using append2 API, write all blocks and
* then close. Verify that all data exists in file.
*/
@Test
public void testSimpleAppend2() throws Exception {
final Configuration conf = new HdfsConfiguration();
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 50);
fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
DistributedFileSystem fs = cluster.getFileSystem();
try {
{ // test appending to a file.
// create a new file.
Path file1 = new Path("/simpleAppend.dat");
FSDataOutputStream stm = AppendTestUtil.createFile(fs, file1, 1);
System.out.println("Created file simpleAppend.dat");
// write to file
int mid = 186; // io.bytes.per.checksum bytes
System.out.println("Writing " + mid + " bytes to file " + file1);
stm.write(fileContents, 0, mid);
stm.close();
System.out.println("Wrote and Closed first part of file.");
// write to file
int mid2 = 607; // io.bytes.per.checksum bytes
System.out.println("Writing " + mid + " bytes to file " + file1);
stm = fs.append(file1,
EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null);
stm.write(fileContents, mid, mid2-mid);
stm.close();
System.out.println("Wrote and Closed second part of file.");
// write the remainder of the file
stm = fs.append(file1,
EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null);
// ensure getPos is set to reflect existing size of the file
assertTrue(stm.getPos() > 0);
System.out.println("Writing " + (AppendTestUtil.FILE_SIZE - mid2) +
" bytes to file " + file1);
stm.write(fileContents, mid2, AppendTestUtil.FILE_SIZE - mid2);
System.out.println("Written second part of file");
stm.close();
System.out.println("Wrote and Closed second part of file.");
// verify that entire file is good
AppendTestUtil.checkFullFile(fs, file1, AppendTestUtil.FILE_SIZE,
fileContents, "Read 2");
// also make sure there three different blocks for the file
List<LocatedBlock> blocks = fs.getClient().getLocatedBlocks(
file1.toString(), 0L).getLocatedBlocks();
assertEquals(12, blocks.size()); // the block size is 1024
assertEquals(mid, blocks.get(0).getBlockSize());
assertEquals(mid2 - mid, blocks.get(1).getBlockSize());
for (int i = 2; i < 11; i++) {
assertEquals(AppendTestUtil.BLOCK_SIZE, blocks.get(i).getBlockSize());
}
assertEquals((AppendTestUtil.FILE_SIZE - mid2)
% AppendTestUtil.BLOCK_SIZE, blocks.get(11).getBlockSize());
}
{ // test appending to an non-existing file.
FSDataOutputStream out = null;
try {
out = fs.append(new Path("/non-existing.dat"),
EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null);
fail("Expected to have FileNotFoundException");
} catch(java.io.FileNotFoundException fnfe) {
System.out.println("Good: got " + fnfe);
fnfe.printStackTrace(System.out);
} finally {
IOUtils.closeStream(out);
}
}
{ // test append permission.
// set root to all writable
Path root = new Path("/");
fs.setPermission(root, new FsPermission((short)0777));
fs.close();
// login as a different user
final UserGroupInformation superuser =
UserGroupInformation.getCurrentUser();
String username = "testappenduser";
String group = "testappendgroup";
assertFalse(superuser.getShortUserName().equals(username));
assertFalse(Arrays.asList(superuser.getGroupNames()).contains(group));
UserGroupInformation appenduser = UserGroupInformation
.createUserForTesting(username, new String[] { group });
fs = (DistributedFileSystem) DFSTestUtil.getFileSystemAs(appenduser,
conf);
// create a file
Path dir = new Path(root, getClass().getSimpleName());
Path foo = new Path(dir, "foo.dat");
FSDataOutputStream out = null;
int offset = 0;
try {
out = fs.create(foo);
int len = 10 + AppendTestUtil.nextInt(100);
out.write(fileContents, offset, len);
offset += len;
} finally {
IOUtils.closeStream(out);
}
// change dir and foo to minimal permissions.
fs.setPermission(dir, new FsPermission((short)0100));
fs.setPermission(foo, new FsPermission((short)0200));
// try append, should success
out = null;
try {
out = fs.append(foo,
EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null);
int len = 10 + AppendTestUtil.nextInt(100);
out.write(fileContents, offset, len);
offset += len;
} finally {
IOUtils.closeStream(out);
}
// change dir and foo to all but no write on foo.
fs.setPermission(foo, new FsPermission((short)0577));
fs.setPermission(dir, new FsPermission((short)0777));
// try append, should fail
out = null;
try {
out = fs.append(foo,
EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null);
fail("Expected to have AccessControlException");
} catch(AccessControlException ace) {
System.out.println("Good: got " + ace);
ace.printStackTrace(System.out);
} finally {
IOUtils.closeStream(out);
}
}
} finally {
fs.close();
cluster.shutdown();
}
}
//
// an object that does a bunch of appends to files
//
class Workload extends Thread {
private final int id;
private final MiniDFSCluster cluster;
private final boolean appendToNewBlock;
Workload(MiniDFSCluster cluster, int threadIndex, boolean append2) {
id = threadIndex;
this.cluster = cluster;
this.appendToNewBlock = append2;
}
// create a bunch of files. Write to them and then verify.
@Override
public void run() {
System.out.println("Workload " + id + " starting... ");
for (int i = 0; i < numAppendsPerThread; i++) {
// pick a file at random and remove it from pool
Path testfile;
synchronized (testFiles) {
if (testFiles.size() == 0) {
System.out.println("Completed write to almost all files.");
return;
}
int index = AppendTestUtil.nextInt(testFiles.size());
testfile = testFiles.remove(index);
}
long len = 0;
int sizeToAppend = 0;
try {
DistributedFileSystem fs = cluster.getFileSystem();
// add a random number of bytes to file
len = fs.getFileStatus(testfile).getLen();
// if file is already full, then pick another file
if (len >= AppendTestUtil.FILE_SIZE) {
System.out.println("File " + testfile + " is full.");
continue;
}
// do small size appends so that we can trigger multiple
// appends to the same file.
//
int left = (int)(AppendTestUtil.FILE_SIZE - len)/3;
if (left <= 0) {
left = 1;
}
sizeToAppend = AppendTestUtil.nextInt(left);
System.out.println("Workload thread " + id +
" appending " + sizeToAppend + " bytes " +
" to file " + testfile +
" of size " + len);
FSDataOutputStream stm = appendToNewBlock ? fs.append(testfile,
EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null)
: fs.append(testfile);
stm.write(fileContents, (int)len, sizeToAppend);
stm.close();
// wait for the file size to be reflected in the namenode metadata
while (fs.getFileStatus(testfile).getLen() != (len + sizeToAppend)) {
try {
System.out.println("Workload thread " + id +
" file " + testfile +
" size " + fs.getFileStatus(testfile).getLen() +
" expected size " + (len + sizeToAppend) +
" waiting for namenode metadata update.");
Thread.sleep(5000);
} catch (InterruptedException e) {}
}
assertTrue("File " + testfile + " size is " +
fs.getFileStatus(testfile).getLen() +
" but expected " + (len + sizeToAppend),
fs.getFileStatus(testfile).getLen() == (len + sizeToAppend));
AppendTestUtil.checkFullFile(fs, testfile, (int) (len + sizeToAppend),
fileContents, "Read 2");
} catch (Throwable e) {
globalStatus = false;
if (e.toString() != null) {
System.out.println("Workload exception " + id +
" testfile " + testfile +
" " + e);
e.printStackTrace();
}
assertTrue("Workload exception " + id + " testfile " + testfile +
" expected size " + (len + sizeToAppend),
false);
}
// Add testfile back to the pool of files.
synchronized (testFiles) {
testFiles.add(testfile);
}
}
}
}
/**
* Test that appends to files at random offsets.
*/
private void testComplexAppend(boolean appendToNewBlock) throws IOException {
fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 2000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 2);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 2);
conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 30000);
conf.setInt(DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY, 30000);
conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 50);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(numDatanodes)
.build();
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
try {
// create a bunch of test files with random replication factors.
// Insert them into a linked list.
//
for (int i = 0; i < numberOfFiles; i++) {
final int replication = AppendTestUtil.nextInt(numDatanodes - 2) + 1;
Path testFile = new Path("/" + i + ".dat");
FSDataOutputStream stm =
AppendTestUtil.createFile(fs, testFile, replication);
stm.close();
testFiles.add(testFile);
}
// Create threads and make them run workload concurrently.
workload = new Workload[numThreads];
for (int i = 0; i < numThreads; i++) {
workload[i] = new Workload(cluster, i, appendToNewBlock);
workload[i].start();
}
// wait for all transactions to get over
for (int i = 0; i < numThreads; i++) {
try {
System.out.println("Waiting for thread " + i + " to complete...");
workload[i].join();
System.out.println("Waiting for thread " + i + " complete.");
} catch (InterruptedException e) {
i--; // retry
}
}
} finally {
fs.close();
cluster.shutdown();
}
// If any of the worker thread failed in their job, indicate that
// this test failed.
//
assertTrue("testComplexAppend Worker encountered exceptions.", globalStatus);
}
@Test
public void testComplexAppend() throws IOException {
testComplexAppend(false);
}
@Test
public void testComplexAppend2() throws IOException {
testComplexAppend(true);
}
}
| 20,300 | 35.710669 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShellGenericOptions.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.PrintWriter;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.util.ToolRunner;
import org.junit.Test;
public class TestDFSShellGenericOptions {
@Test
public void testDFSCommand() throws IOException {
String namenode = null;
MiniDFSCluster cluster = null;
try {
Configuration conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).build();
namenode = FileSystem.getDefaultUri(conf).toString();
String [] args = new String[4];
args[2] = "-mkdir";
args[3] = "/data";
testFsOption(args, namenode);
testConfOption(args, namenode);
testPropertyOption(args, namenode);
} finally {
if (cluster != null) { cluster.shutdown(); }
}
}
private void testFsOption(String [] args, String namenode) {
// prepare arguments to create a directory /data
args[0] = "-fs";
args[1] = namenode;
execute(args, namenode);
}
private void testConfOption(String[] args, String namenode) {
// prepare configuration hdfs-site.xml
File configDir = new File(new File("build", "test"), "minidfs");
assertTrue(configDir.mkdirs());
File siteFile = new File(configDir, "hdfs-site.xml");
PrintWriter pw;
try {
pw = new PrintWriter(siteFile);
pw.print("<?xml version=\"1.0\"?>\n"+
"<?xml-stylesheet type=\"text/xsl\" href=\"configuration.xsl\"?>\n"+
"<configuration>\n"+
" <property>\n"+
" <name>fs.defaultFS</name>\n"+
" <value>"+namenode+"</value>\n"+
" </property>\n"+
"</configuration>\n");
pw.close();
// prepare arguments to create a directory /data
args[0] = "-conf";
args[1] = siteFile.getPath();
execute(args, namenode);
} catch (FileNotFoundException e) {
e.printStackTrace();
} finally {
siteFile.delete();
configDir.delete();
}
}
private void testPropertyOption(String[] args, String namenode) {
// prepare arguments to create a directory /data
args[0] = "-D";
args[1] = "fs.defaultFS="+namenode;
execute(args, namenode);
}
private void execute(String [] args, String namenode) {
FsShell shell=new FsShell();
FileSystem fs=null;
try {
ToolRunner.run(shell, args);
fs = FileSystem.get(NameNode.getUri(NameNode.getAddress(namenode)),
shell.getConf());
assertTrue("Directory does not get created",
fs.isDirectory(new Path("/data")));
fs.delete(new Path("/data"), true);
} catch (Exception e) {
System.err.println(e.getMessage());
e.printStackTrace();
} finally {
if (fs!=null) {
try {
fs.close();
} catch (IOException ignored) {
}
}
}
}
}
| 4,017 | 31.403226 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BenchmarkThroughput.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.ChecksumFileSystem;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalDirAllocator;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.log4j.Level;
/**
* This class benchmarks the performance of the local file system, raw local
* file system and HDFS at reading and writing files. The user should invoke
* the main of this class and optionally include a repetition count.
*/
public class BenchmarkThroughput extends Configured implements Tool {
// the property in the config that specifies a working directory
private LocalDirAllocator dir;
private long startTime;
// the size of the buffer to use
private int BUFFER_SIZE;
private void resetMeasurements() {
startTime = Time.now();
}
private void printMeasurements() {
System.out.println(" time: " +
((Time.now() - startTime)/1000));
}
private Path writeLocalFile(String name, Configuration conf,
long total) throws IOException {
Path path = dir.getLocalPathForWrite(name, total, conf);
System.out.print("Writing " + name);
resetMeasurements();
OutputStream out = new FileOutputStream(new File(path.toString()));
byte[] data = new byte[BUFFER_SIZE];
for(long size=0; size < total; size += BUFFER_SIZE) {
out.write(data);
}
out.close();
printMeasurements();
return path;
}
private void readLocalFile(Path path,
String name,
Configuration conf) throws IOException {
System.out.print("Reading " + name);
resetMeasurements();
InputStream in = new FileInputStream(new File(path.toString()));
byte[] data = new byte[BUFFER_SIZE];
long size = 0;
while (size >= 0) {
size = in.read(data);
}
in.close();
printMeasurements();
}
private void writeAndReadLocalFile(String name,
Configuration conf,
long size
) throws IOException {
Path f = null;
try {
f = writeLocalFile(name, conf, size);
readLocalFile(f, name, conf);
} finally {
if (f != null) {
new File(f.toString()).delete();
}
}
}
private Path writeFile(FileSystem fs,
String name,
Configuration conf,
long total
) throws IOException {
Path f = dir.getLocalPathForWrite(name, total, conf);
System.out.print("Writing " + name);
resetMeasurements();
OutputStream out = fs.create(f);
byte[] data = new byte[BUFFER_SIZE];
for(long size = 0; size < total; size += BUFFER_SIZE) {
out.write(data);
}
out.close();
printMeasurements();
return f;
}
private void readFile(FileSystem fs,
Path f,
String name,
Configuration conf
) throws IOException {
System.out.print("Reading " + name);
resetMeasurements();
InputStream in = fs.open(f);
byte[] data = new byte[BUFFER_SIZE];
long val = 0;
while (val >= 0) {
val = in.read(data);
}
in.close();
printMeasurements();
}
private void writeAndReadFile(FileSystem fs,
String name,
Configuration conf,
long size
) throws IOException {
Path f = null;
try {
f = writeFile(fs, name, conf, size);
readFile(fs, f, name, conf);
} finally {
try {
if (f != null) {
fs.delete(f, true);
}
} catch (IOException ie) {
// IGNORE
}
}
}
private static void printUsage() {
ToolRunner.printGenericCommandUsage(System.err);
System.err.println("Usage: dfsthroughput [#reps]");
System.err.println("Config properties:\n" +
" dfsthroughput.file.size:\tsize of each write/read (10GB)\n" +
" dfsthroughput.buffer.size:\tbuffer size for write/read (4k)\n");
}
@Override
public int run(String[] args) throws IOException {
// silence the minidfs cluster
Log hadoopLog = LogFactory.getLog("org");
if (hadoopLog instanceof Log4JLogger) {
((Log4JLogger) hadoopLog).getLogger().setLevel(Level.WARN);
}
int reps = 1;
if (args.length == 1) {
try {
reps = Integer.parseInt(args[0]);
} catch (NumberFormatException e) {
printUsage();
return -1;
}
} else if (args.length > 1) {
printUsage();
return -1;
}
Configuration conf = getConf();
// the size of the file to write
long SIZE = conf.getLong("dfsthroughput.file.size",
10L * 1024 * 1024 * 1024);
BUFFER_SIZE = conf.getInt("dfsthroughput.buffer.size", 4 * 1024);
String localDir = conf.get("mapred.temp.dir");
if (localDir == null) {
localDir = conf.get("hadoop.tmp.dir");
conf.set("mapred.temp.dir", localDir);
}
dir = new LocalDirAllocator("mapred.temp.dir");
System.setProperty("test.build.data", localDir);
System.out.println("Local = " + localDir);
ChecksumFileSystem checkedLocal = FileSystem.getLocal(conf);
FileSystem rawLocal = checkedLocal.getRawFileSystem();
for(int i=0; i < reps; ++i) {
writeAndReadLocalFile("local", conf, SIZE);
writeAndReadFile(rawLocal, "raw", conf, SIZE);
writeAndReadFile(checkedLocal, "checked", conf, SIZE);
}
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf)
.racks(new String[]{"/foo"}).build();
cluster.waitActive();
FileSystem dfs = cluster.getFileSystem();
for(int i=0; i < reps; ++i) {
writeAndReadFile(dfs, "dfs", conf, SIZE);
}
} finally {
if (cluster != null) {
cluster.shutdown();
// clean up minidfs junk
rawLocal.delete(new Path(localDir, "dfs"), true);
}
}
return 0;
}
/**
* @param args arguments
*/
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new HdfsConfiguration(),
new BenchmarkThroughput(), args);
System.exit(res);
}
}
| 7,811 | 31.414938 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInFileContext.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.EnumSet;
import java.util.Random;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.log4j.Level;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
/**
* This class tests the FileStatus API.
*/
public class TestListFilesInFileContext {
{
((Log4JLogger)FileSystem.LOG).getLogger().setLevel(Level.ALL);
}
static final long seed = 0xDEADBEEFL;
final private static Configuration conf = new Configuration();
private static MiniDFSCluster cluster;
private static FileContext fc;
final private static Path TEST_DIR = new Path("/main_");
final private static int FILE_LEN = 10;
final private static Path FILE1 = new Path(TEST_DIR, "file1");
final private static Path DIR1 = new Path(TEST_DIR, "dir1");
final private static Path FILE2 = new Path(DIR1, "file2");
final private static Path FILE3 = new Path(DIR1, "file3");
@BeforeClass
public static void testSetUp() throws Exception {
cluster = new MiniDFSCluster.Builder(conf).build();
fc = FileContext.getFileContext(cluster.getConfiguration(0));
fc.delete(TEST_DIR, true);
}
private static void writeFile(FileContext fc, Path name, int fileSize)
throws IOException {
// Create and write a file that contains three blocks of data
FSDataOutputStream stm = fc.create(name, EnumSet.of(CreateFlag.CREATE),
Options.CreateOpts.createParent());
byte[] buffer = new byte[fileSize];
Random rand = new Random(seed);
rand.nextBytes(buffer);
stm.write(buffer);
stm.close();
}
@AfterClass
public static void testShutdown() throws Exception {
cluster.shutdown();
}
/** Test when input path is a file */
@Test
public void testFile() throws IOException {
fc.mkdir(TEST_DIR, FsPermission.getDefault(), true);
writeFile(fc, FILE1, FILE_LEN);
RemoteIterator<LocatedFileStatus> itor = fc.util().listFiles(
FILE1, true);
LocatedFileStatus stat = itor.next();
assertFalse(itor.hasNext());
assertTrue(stat.isFile());
assertEquals(FILE_LEN, stat.getLen());
assertEquals(fc.makeQualified(FILE1), stat.getPath());
assertEquals(1, stat.getBlockLocations().length);
itor = fc.util().listFiles(FILE1, false);
stat = itor.next();
assertFalse(itor.hasNext());
assertTrue(stat.isFile());
assertEquals(FILE_LEN, stat.getLen());
assertEquals(fc.makeQualified(FILE1), stat.getPath());
assertEquals(1, stat.getBlockLocations().length);
}
@After
public void cleanDir() throws IOException {
fc.delete(TEST_DIR, true);
}
/** Test when input path is a directory */
@Test
public void testDirectory() throws IOException {
fc.mkdir(DIR1, FsPermission.getDefault(), true);
// test empty directory
RemoteIterator<LocatedFileStatus> itor = fc.util().listFiles(
DIR1, true);
assertFalse(itor.hasNext());
itor = fc.util().listFiles(DIR1, false);
assertFalse(itor.hasNext());
// testing directory with 1 file
writeFile(fc, FILE2, FILE_LEN);
itor = fc.util().listFiles(DIR1, true);
LocatedFileStatus stat = itor.next();
assertFalse(itor.hasNext());
assertTrue(stat.isFile());
assertEquals(FILE_LEN, stat.getLen());
assertEquals(fc.makeQualified(FILE2), stat.getPath());
assertEquals(1, stat.getBlockLocations().length);
itor = fc.util().listFiles(DIR1, false);
stat = itor.next();
assertFalse(itor.hasNext());
assertTrue(stat.isFile());
assertEquals(FILE_LEN, stat.getLen());
assertEquals(fc.makeQualified(FILE2), stat.getPath());
assertEquals(1, stat.getBlockLocations().length);
// test more complicated directory
writeFile(fc, FILE1, FILE_LEN);
writeFile(fc, FILE3, FILE_LEN);
itor = fc.util().listFiles(TEST_DIR, true);
stat = itor.next();
assertTrue(stat.isFile());
assertEquals(fc.makeQualified(FILE2), stat.getPath());
stat = itor.next();
assertTrue(stat.isFile());
assertEquals(fc.makeQualified(FILE3), stat.getPath());
stat = itor.next();
assertTrue(stat.isFile());
assertEquals(fc.makeQualified(FILE1), stat.getPath());
assertFalse(itor.hasNext());
itor = fc.util().listFiles(TEST_DIR, false);
stat = itor.next();
assertTrue(stat.isFile());
assertEquals(fc.makeQualified(FILE1), stat.getPath());
assertFalse(itor.hasNext());
}
/** Test when input patch has a symbolic links as its children */
@Test
public void testSymbolicLinks() throws IOException {
writeFile(fc, FILE1, FILE_LEN);
writeFile(fc, FILE2, FILE_LEN);
writeFile(fc, FILE3, FILE_LEN);
Path dir4 = new Path(TEST_DIR, "dir4");
Path dir5 = new Path(dir4, "dir5");
Path file4 = new Path(dir4, "file4");
fc.createSymlink(DIR1, dir5, true);
fc.createSymlink(FILE1, file4, true);
RemoteIterator<LocatedFileStatus> itor = fc.util().listFiles(dir4, true);
LocatedFileStatus stat = itor.next();
assertTrue(stat.isFile());
assertEquals(fc.makeQualified(FILE2), stat.getPath());
stat = itor.next();
assertTrue(stat.isFile());
assertEquals(fc.makeQualified(FILE3), stat.getPath());
stat = itor.next();
assertTrue(stat.isFile());
assertEquals(fc.makeQualified(FILE1), stat.getPath());
assertFalse(itor.hasNext());
itor = fc.util().listFiles(dir4, false);
stat = itor.next();
assertTrue(stat.isFile());
assertEquals(fc.makeQualified(FILE1), stat.getPath());
assertFalse(itor.hasNext());
}
}
| 7,002 | 33.160976 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.*;
import java.io.FileNotFoundException;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.InvalidPathException;
import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.util.Time;
import org.junit.Test;
/**
* This class tests that the DFS command mkdirs only creates valid
* directories, and generally behaves as expected.
*/
public class TestDFSMkdirs {
private final Configuration conf = new HdfsConfiguration();
private static final String[] NON_CANONICAL_PATHS = new String[] {
"//test1",
"/test2/..",
"/test2//bar",
"/test2/../test4",
"/test5/."
};
/**
* Tests mkdirs can create a directory that does not exist and will
* not create a subdirectory off a file. Regression test for HADOOP-281.
*/
@Test
public void testDFSMkdirs() throws IOException {
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
FileSystem fileSys = cluster.getFileSystem();
try {
// First create a new directory with mkdirs
Path myPath = new Path("/test/mkdirs");
assertTrue(fileSys.mkdirs(myPath));
assertTrue(fileSys.exists(myPath));
assertTrue(fileSys.mkdirs(myPath));
// Second, create a file in that directory.
Path myFile = new Path("/test/mkdirs/myFile");
DFSTestUtil.writeFile(fileSys, myFile, "hello world");
// Third, use mkdir to create a subdirectory off of that file,
// and check that it fails.
Path myIllegalPath = new Path("/test/mkdirs/myFile/subdir");
Boolean exist = true;
try {
fileSys.mkdirs(myIllegalPath);
} catch (IOException e) {
exist = false;
}
assertFalse(exist);
assertFalse(fileSys.exists(myIllegalPath));
fileSys.delete(myFile, true);
} finally {
fileSys.close();
cluster.shutdown();
}
}
/**
* Tests mkdir will not create directory when parent is missing.
*/
@Test
public void testMkdir() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
DistributedFileSystem dfs = cluster.getFileSystem();
try {
// Create a dir in root dir, should succeed
assertTrue(dfs.mkdir(new Path("/mkdir-" + Time.now()),
FsPermission.getDefault()));
// Create a dir when parent dir exists as a file, should fail
IOException expectedException = null;
String filePath = "/mkdir-file-" + Time.now();
DFSTestUtil.writeFile(dfs, new Path(filePath), "hello world");
try {
dfs.mkdir(new Path(filePath + "/mkdir"), FsPermission.getDefault());
} catch (IOException e) {
expectedException = e;
}
assertTrue("Create a directory when parent dir exists as file using"
+ " mkdir() should throw ParentNotDirectoryException ",
expectedException != null
&& expectedException instanceof ParentNotDirectoryException);
// Create a dir in a non-exist directory, should fail
expectedException = null;
try {
dfs.mkdir(new Path("/non-exist/mkdir-" + Time.now()),
FsPermission.getDefault());
} catch (IOException e) {
expectedException = e;
}
assertTrue("Create a directory in a non-exist parent dir using"
+ " mkdir() should throw FileNotFoundException ",
expectedException != null
&& expectedException instanceof FileNotFoundException);
} finally {
dfs.close();
cluster.shutdown();
}
}
/**
* Regression test for HDFS-3626. Creates a file using a non-canonical path
* (i.e. with extra slashes between components) and makes sure that the NN
* rejects it.
*/
@Test
public void testMkdirRpcNonCanonicalPath() throws IOException {
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
try {
NamenodeProtocols nnrpc = cluster.getNameNodeRpc();
for (String pathStr : NON_CANONICAL_PATHS) {
try {
nnrpc.mkdirs(pathStr, new FsPermission((short)0755), true);
fail("Did not fail when called with a non-canonicalized path: "
+ pathStr);
} catch (InvalidPathException ipe) {
// expected
}
}
} finally {
cluster.shutdown();
}
}
}
| 5,513 | 34.346154 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSClusterWithNodeGroup.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.StaticMapping;
import org.apache.hadoop.security.UserGroupInformation;
public class MiniDFSClusterWithNodeGroup extends MiniDFSCluster {
private static String[] NODE_GROUPS = null;
private static final Log LOG = LogFactory.getLog(MiniDFSClusterWithNodeGroup.class);
public MiniDFSClusterWithNodeGroup(Builder builder) throws IOException {
super(builder);
}
public static void setNodeGroups (String[] nodeGroups) {
NODE_GROUPS = nodeGroups;
}
public synchronized void startDataNodes(Configuration conf, int numDataNodes,
StorageType[][] storageTypes, boolean manageDfsDirs, StartupOption operation,
String[] racks, String[] nodeGroups, String[] hosts,
long[][] storageCapacities,
long[] simulatedCapacities,
boolean setupHostsFile,
boolean checkDataNodeAddrConfig,
boolean checkDataNodeHostConfig) throws IOException {
assert storageCapacities == null || simulatedCapacities == null;
assert storageTypes == null || storageTypes.length == numDataNodes;
assert storageCapacities == null || storageCapacities.length == numDataNodes;
if (operation == StartupOption.RECOVER) {
return;
}
if (checkDataNodeHostConfig) {
conf.setIfUnset(DFS_DATANODE_HOST_NAME_KEY, "127.0.0.1");
} else {
conf.set(DFS_DATANODE_HOST_NAME_KEY, "127.0.0.1");
}
conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "127.0.0.1");
int curDatanodesNum = dataNodes.size();
// for mincluster's the default initialDelay for BRs is 0
if (conf.get(DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY) == null) {
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY, 0);
}
// If minicluster's name node is null assume that the conf has been
// set with the right address:port of the name node.
//
if (racks != null && numDataNodes > racks.length ) {
throw new IllegalArgumentException( "The length of racks [" + racks.length
+ "] is less than the number of datanodes [" + numDataNodes + "].");
}
if (nodeGroups != null && numDataNodes > nodeGroups.length ) {
throw new IllegalArgumentException( "The length of nodeGroups [" + nodeGroups.length
+ "] is less than the number of datanodes [" + numDataNodes + "].");
}
if (hosts != null && numDataNodes > hosts.length ) {
throw new IllegalArgumentException( "The length of hosts [" + hosts.length
+ "] is less than the number of datanodes [" + numDataNodes + "].");
}
//Generate some hostnames if required
if (racks != null && hosts == null) {
hosts = new String[numDataNodes];
for (int i = curDatanodesNum; i < curDatanodesNum + numDataNodes; i++) {
hosts[i - curDatanodesNum] = "host" + i + ".foo.com";
}
}
if (simulatedCapacities != null
&& numDataNodes > simulatedCapacities.length) {
throw new IllegalArgumentException( "The length of simulatedCapacities ["
+ simulatedCapacities.length
+ "] is less than the number of datanodes [" + numDataNodes + "].");
}
String [] dnArgs = (operation == null ||
operation != StartupOption.ROLLBACK) ?
null : new String[] {operation.getName()};
DataNode[] dns = new DataNode[numDataNodes];
for (int i = curDatanodesNum; i < curDatanodesNum+numDataNodes; i++) {
Configuration dnConf = new HdfsConfiguration(conf);
// Set up datanode address
setupDatanodeAddress(dnConf, setupHostsFile, checkDataNodeAddrConfig);
if (manageDfsDirs) {
String dirs = makeDataNodeDirs(i, storageTypes == null ? null : storageTypes[i]);
dnConf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dirs);
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dirs);
}
if (simulatedCapacities != null) {
SimulatedFSDataset.setFactory(dnConf);
dnConf.setLong(SimulatedFSDataset.CONFIG_PROPERTY_CAPACITY,
simulatedCapacities[i-curDatanodesNum]);
}
LOG.info("Starting DataNode " + i + " with "
+ DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY + ": "
+ dnConf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY));
if (hosts != null) {
dnConf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, hosts[i - curDatanodesNum]);
LOG.info("Starting DataNode " + i + " with hostname set to: "
+ dnConf.get(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY));
}
if (racks != null) {
String name = hosts[i - curDatanodesNum];
if (nodeGroups == null) {
LOG.info("Adding node with hostname : " + name + " to rack " +
racks[i-curDatanodesNum]);
StaticMapping.addNodeToRack(name,racks[i-curDatanodesNum]);
} else {
LOG.info("Adding node with hostname : " + name + " to serverGroup " +
nodeGroups[i-curDatanodesNum] + " and rack " +
racks[i-curDatanodesNum]);
StaticMapping.addNodeToRack(name,racks[i-curDatanodesNum] +
nodeGroups[i-curDatanodesNum]);
}
}
Configuration newconf = new HdfsConfiguration(dnConf); // save config
if (hosts != null) {
NetUtils.addStaticResolution(hosts[i - curDatanodesNum], "localhost");
}
SecureResources secureResources = null;
if (UserGroupInformation.isSecurityEnabled()) {
try {
secureResources = SecureDataNodeStarter.getSecureResources(dnConf);
} catch (Exception ex) {
ex.printStackTrace();
}
}
DataNode dn = DataNode.instantiateDataNode(dnArgs, dnConf, secureResources);
if(dn == null)
throw new IOException("Cannot start DataNode in "
+ dnConf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY));
//since the HDFS does things based on IP:port, we need to add the mapping
//for IP:port to rackId
String ipAddr = dn.getXferAddress().getAddress().getHostAddress();
if (racks != null) {
int port = dn.getXferAddress().getPort();
if (nodeGroups == null) {
LOG.info("Adding node with IP:port : " + ipAddr + ":" + port +
" to rack " + racks[i-curDatanodesNum]);
StaticMapping.addNodeToRack(ipAddr + ":" + port,
racks[i-curDatanodesNum]);
} else {
LOG.info("Adding node with IP:port : " + ipAddr + ":" + port + " to nodeGroup " +
nodeGroups[i-curDatanodesNum] + " and rack " + racks[i-curDatanodesNum]);
StaticMapping.addNodeToRack(ipAddr + ":" + port, racks[i-curDatanodesNum] +
nodeGroups[i-curDatanodesNum]);
}
}
dn.runDatanodeDaemon();
dataNodes.add(new DataNodeProperties(dn, newconf, dnArgs, secureResources, dn.getIpcPort()));
dns[i - curDatanodesNum] = dn;
}
curDatanodesNum += numDataNodes;
this.numDataNodes += numDataNodes;
waitActive();
if (storageCapacities != null) {
for (int i = curDatanodesNum; i < curDatanodesNum+numDataNodes; ++i) {
try (FsDatasetSpi.FsVolumeReferences volumes =
dns[i].getFSDataset().getFsVolumeReferences()) {
assert volumes.size() == storagesPerDatanode;
for (int j = 0; j < volumes.size(); ++j) {
FsVolumeImpl volume = (FsVolumeImpl) volumes.get(j);
volume.setCapacityForTesting(storageCapacities[i][j]);
}
}
}
}
}
public synchronized void startDataNodes(Configuration conf, int numDataNodes,
boolean manageDfsDirs, StartupOption operation,
String[] racks, String[] nodeGroups, String[] hosts,
long[] simulatedCapacities,
boolean setupHostsFile) throws IOException {
startDataNodes(conf, numDataNodes, null, manageDfsDirs, operation, racks, nodeGroups,
hosts, null, simulatedCapacities, setupHostsFile, false, false);
}
public void startDataNodes(Configuration conf, int numDataNodes,
boolean manageDfsDirs, StartupOption operation,
String[] racks, long[] simulatedCapacities,
String[] nodeGroups) throws IOException {
startDataNodes(conf, numDataNodes, manageDfsDirs, operation, racks, nodeGroups,
null, simulatedCapacities, false);
}
// This is for initialize from parent class.
@Override
public synchronized void startDataNodes(Configuration conf, int numDataNodes,
StorageType[][] storageTypes, boolean manageDfsDirs, StartupOption operation,
String[] racks, String[] hosts,
long[][] storageCapacities,
long[] simulatedCapacities,
boolean setupHostsFile,
boolean checkDataNodeAddrConfig,
boolean checkDataNodeHostConfig,
Configuration[] dnConfOverlays) throws IOException {
startDataNodes(conf, numDataNodes, storageTypes, manageDfsDirs, operation, racks,
NODE_GROUPS, hosts, storageCapacities, simulatedCapacities, setupHostsFile,
checkDataNodeAddrConfig, checkDataNodeHostConfig);
}
}
| 10,631 | 42.57377 | 99 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.File;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.junit.After;
import org.junit.Test;
/**
* This test ensures the appropriate response (successful or failure) from
* a Datanode when the system is started with differing version combinations.
*/
public class TestDFSStartupVersions {
private static final Log LOG = LogFactory.getLog(
"org.apache.hadoop.hdfs.TestDFSStartupVersions");
private MiniDFSCluster cluster = null;
/**
* Writes an INFO log message containing the parameters.
*/
void log(String label, NodeType nodeType, Integer testCase,
StorageData sd) {
String testCaseLine = "";
if (testCase != null) {
testCaseLine = " testCase="+testCase;
}
LOG.info("============================================================");
LOG.info("***TEST*** " + label + ":"
+ testCaseLine
+ " nodeType="+nodeType
+ " layoutVersion="+sd.storageInfo.getLayoutVersion()
+ " namespaceID="+sd.storageInfo.getNamespaceID()
+ " fsscTime="+sd.storageInfo.getCTime()
+ " clusterID="+sd.storageInfo.getClusterID()
+ " BlockPoolID="+sd.blockPoolId);
}
/**
* Class used for initializing version information for tests
*/
private static class StorageData {
private final StorageInfo storageInfo;
private final String blockPoolId;
StorageData(int layoutVersion, int namespaceId, String clusterId,
long cTime, String bpid) {
storageInfo = new StorageInfo(layoutVersion, namespaceId, clusterId,
cTime, NodeType.DATA_NODE);
blockPoolId = bpid;
}
}
/**
* Initialize the versions array. This array stores all combinations
* of cross product:
* {oldLayoutVersion,currentLayoutVersion,futureLayoutVersion} X
* {currentNamespaceId,incorrectNamespaceId} X
* {pastFsscTime,currentFsscTime,futureFsscTime}
*/
private StorageData[] initializeVersions() throws Exception {
int layoutVersionOld = Storage.LAST_UPGRADABLE_LAYOUT_VERSION;
int layoutVersionCur = HdfsServerConstants.DATANODE_LAYOUT_VERSION;
int layoutVersionNew = Integer.MIN_VALUE;
int namespaceIdCur = UpgradeUtilities.getCurrentNamespaceID(null);
int namespaceIdOld = Integer.MIN_VALUE;
long fsscTimeOld = Long.MIN_VALUE;
long fsscTimeCur = UpgradeUtilities.getCurrentFsscTime(null);
long fsscTimeNew = Long.MAX_VALUE;
String clusterID = "testClusterID";
String invalidClusterID = "testClusterID";
String bpid = UpgradeUtilities.getCurrentBlockPoolID(null);
String invalidBpid = "invalidBpid";
return new StorageData[] {
new StorageData(layoutVersionOld, namespaceIdCur, clusterID,
fsscTimeOld, bpid), // 0
new StorageData(layoutVersionOld, namespaceIdCur, clusterID,
fsscTimeCur, bpid), // 1
new StorageData(layoutVersionOld, namespaceIdCur, clusterID,
fsscTimeNew, bpid), // 2
new StorageData(layoutVersionOld, namespaceIdOld, clusterID,
fsscTimeOld, bpid), // 3
new StorageData(layoutVersionOld, namespaceIdOld, clusterID,
fsscTimeCur, bpid), // 4
new StorageData(layoutVersionOld, namespaceIdOld, clusterID,
fsscTimeNew, bpid), // 5
new StorageData(layoutVersionCur, namespaceIdCur, clusterID,
fsscTimeOld, bpid), // 6
new StorageData(layoutVersionCur, namespaceIdCur, clusterID,
fsscTimeCur, bpid), // 7
new StorageData(layoutVersionCur, namespaceIdCur, clusterID,
fsscTimeNew, bpid), // 8
new StorageData(layoutVersionCur, namespaceIdOld, clusterID,
fsscTimeOld, bpid), // 9
new StorageData(layoutVersionCur, namespaceIdOld, clusterID,
fsscTimeCur, bpid), // 10
new StorageData(layoutVersionCur, namespaceIdOld, clusterID,
fsscTimeNew, bpid), // 11
new StorageData(layoutVersionNew, namespaceIdCur, clusterID,
fsscTimeOld, bpid), // 12
new StorageData(layoutVersionNew, namespaceIdCur, clusterID,
fsscTimeCur, bpid), // 13
new StorageData(layoutVersionNew, namespaceIdCur, clusterID,
fsscTimeNew, bpid), // 14
new StorageData(layoutVersionNew, namespaceIdOld, clusterID,
fsscTimeOld, bpid), // 15
new StorageData(layoutVersionNew, namespaceIdOld, clusterID,
fsscTimeCur, bpid), // 16
new StorageData(layoutVersionNew, namespaceIdOld, clusterID,
fsscTimeNew, bpid), // 17
// Test with invalid clusterId
new StorageData(layoutVersionCur, namespaceIdCur, invalidClusterID,
fsscTimeCur, bpid), // 18
// Test with invalid block pool Id
new StorageData(layoutVersionCur, namespaceIdCur, clusterID,
fsscTimeCur, invalidBpid) // 19
};
}
/**
* Determines if the given Namenode version and Datanode version
* are compatible with each other. Compatibility in this case mean
* that the Namenode and Datanode will successfully start up and
* will work together. The rules for compatibility,
* taken from the DFS Upgrade Design, are as follows:
* <pre>
* <ol>
* <li>Check 0: Datanode namespaceID != Namenode namespaceID the startup fails
* </li>
* <li>Check 1: Datanode clusterID != Namenode clusterID the startup fails
* </li>
* <li>Check 2: Datanode blockPoolID != Namenode blockPoolID the startup fails
* </li>
* <li>Check 3: The data-node does regular startup (no matter which options
* it is started with) if
* softwareLV == storedLV AND
* DataNode.FSSCTime == NameNode.FSSCTime
* </li>
* <li>Check 4: The data-node performs an upgrade if it is started without any
* options and
* |softwareLV| > |storedLV| OR
* (softwareLV == storedLV AND
* DataNode.FSSCTime < NameNode.FSSCTime)
* </li>
* <li>NOT TESTED: The data-node rolls back if it is started with
* the -rollback option and
* |softwareLV| >= |previous.storedLV| AND
* DataNode.previous.FSSCTime <= NameNode.FSSCTime
* </li>
* <li>Check 5: In all other cases the startup fails.</li>
* </ol>
* </pre>
*/
boolean isVersionCompatible(StorageData namenodeSd, StorageData datanodeSd) {
final StorageInfo namenodeVer = namenodeSd.storageInfo;
final StorageInfo datanodeVer = datanodeSd.storageInfo;
// check #0
if (namenodeVer.getNamespaceID() != datanodeVer.getNamespaceID()) {
LOG.info("namespaceIDs are not equal: isVersionCompatible=false");
return false;
}
// check #1
if (!namenodeVer.getClusterID().equals(datanodeVer.getClusterID())) {
LOG.info("clusterIDs are not equal: isVersionCompatible=false");
return false;
}
// check #2
if (!namenodeSd.blockPoolId.equals(datanodeSd.blockPoolId)) {
LOG.info("blockPoolIDs are not equal: isVersionCompatible=false");
return false;
}
// check #3
int softwareLV = HdfsServerConstants.DATANODE_LAYOUT_VERSION;
int storedLV = datanodeVer.getLayoutVersion();
if (softwareLV == storedLV &&
datanodeVer.getCTime() == namenodeVer.getCTime())
{
LOG.info("layoutVersions and cTimes are equal: isVersionCompatible=true");
return true;
}
// check #4
long absSoftwareLV = Math.abs((long)softwareLV);
long absStoredLV = Math.abs((long)storedLV);
if (absSoftwareLV > absStoredLV ||
(softwareLV == storedLV &&
datanodeVer.getCTime() < namenodeVer.getCTime()))
{
LOG.info("softwareLayoutVersion is newer OR namenode cTime is newer: isVersionCompatible=true");
return true;
}
// check #5
LOG.info("default case: isVersionCompatible=false");
return false;
}
/**
* This test ensures the appropriate response (successful or failure) from
* a Datanode when the system is started with differing version combinations.
* <pre>
* For each 3-tuple in the cross product
* ({oldLayoutVersion,currentLayoutVersion,futureLayoutVersion},
* {currentNamespaceId,incorrectNamespaceId},
* {pastFsscTime,currentFsscTime,futureFsscTime})
* 1. Startup Namenode with version file containing
* (currentLayoutVersion,currentNamespaceId,currentFsscTime)
* 2. Attempt to startup Datanode with version file containing
* this iterations version 3-tuple
* </pre>
*/
@Test (timeout=300000)
public void testVersions() throws Exception {
UpgradeUtilities.initialize();
Configuration conf = UpgradeUtilities.initializeStorageStateConf(1,
new HdfsConfiguration());
StorageData[] versions = initializeVersions();
UpgradeUtilities.createNameNodeStorageDirs(
conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY), "current");
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
.format(false)
.manageDataDfsDirs(false)
.manageNameDfsDirs(false)
.startupOption(StartupOption.REGULAR)
.build();
StorageData nameNodeVersion = new StorageData(
HdfsServerConstants.NAMENODE_LAYOUT_VERSION,
UpgradeUtilities.getCurrentNamespaceID(cluster),
UpgradeUtilities.getCurrentClusterID(cluster),
UpgradeUtilities.getCurrentFsscTime(cluster),
UpgradeUtilities.getCurrentBlockPoolID(cluster));
log("NameNode version info", NAME_NODE, null, nameNodeVersion);
String bpid = UpgradeUtilities.getCurrentBlockPoolID(cluster);
for (int i = 0; i < versions.length; i++) {
File[] storage = UpgradeUtilities.createDataNodeStorageDirs(
conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY), "current");
log("DataNode version info", DATA_NODE, i, versions[i]);
UpgradeUtilities.createDataNodeVersionFile(storage,
versions[i].storageInfo, bpid, versions[i].blockPoolId);
try {
cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
} catch (Exception ignore) {
// Ignore. The asserts below will check for problems.
// ignore.printStackTrace();
}
assertTrue(cluster.getNameNode() != null);
assertEquals(isVersionCompatible(nameNodeVersion, versions[i]),
cluster.isDataNodeUp());
cluster.shutdownDataNodes();
}
}
@After
public void tearDown() throws Exception {
LOG.info("Shutting down MiniDFSCluster");
if (cluster != null) cluster.shutdown();
}
public static void main(String[] args) throws Exception {
new TestDFSStartupVersions().testVersions();
}
}
| 12,543 | 41.666667 | 104 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.junit.Assert;
import org.junit.Test;
import javax.management.*;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
/**
* The test makes sure that NameNode detects presense blocks that do not have
* any valid replicas. In addition, it verifies that HDFS front page displays
* a warning in such a case.
*/
public class TestMissingBlocksAlert {
private static final Log LOG =
LogFactory.getLog(TestMissingBlocksAlert.class);
@Test
public void testMissingBlocksAlert()
throws IOException, InterruptedException,
MalformedObjectNameException, AttributeNotFoundException,
MBeanException, ReflectionException,
InstanceNotFoundException {
MiniDFSCluster cluster = null;
try {
Configuration conf = new HdfsConfiguration();
//minimize test delay
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 0);
conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
int fileLen = 10*1024;
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, fileLen/2);
//start a cluster with single datanode
cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
final BlockManager bm = cluster.getNamesystem().getBlockManager();
DistributedFileSystem dfs =
cluster.getFileSystem();
// create a normal file
DFSTestUtil.createFile(dfs, new Path("/testMissingBlocksAlert/file1"),
fileLen, (short)3, 0);
Path corruptFile = new Path("/testMissingBlocks/corruptFile");
DFSTestUtil.createFile(dfs, corruptFile, fileLen, (short)3, 0);
// Corrupt the block
ExtendedBlock block = DFSTestUtil.getFirstBlock(dfs, corruptFile);
assertTrue(cluster.corruptReplica(0, block));
// read the file so that the corrupt block is reported to NN
FSDataInputStream in = dfs.open(corruptFile);
try {
in.readFully(new byte[fileLen]);
} catch (ChecksumException ignored) { // checksum error is expected.
}
in.close();
LOG.info("Waiting for missing blocks count to increase...");
while (dfs.getMissingBlocksCount() <= 0) {
Thread.sleep(100);
}
assertTrue(dfs.getMissingBlocksCount() == 1);
assertEquals(4, dfs.getUnderReplicatedBlocksCount());
assertEquals(3, bm.getUnderReplicatedNotMissingBlocks());
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName = new ObjectName(
"Hadoop:service=NameNode,name=NameNodeInfo");
Assert.assertEquals(1, (long)(Long) mbs.getAttribute(mxbeanName,
"NumberOfMissingBlocks"));
// now do the reverse : remove the file expect the number of missing
// blocks to go to zero
dfs.delete(corruptFile, true);
LOG.info("Waiting for missing blocks count to be zero...");
while (dfs.getMissingBlocksCount() > 0) {
Thread.sleep(100);
}
assertEquals(2, dfs.getUnderReplicatedBlocksCount());
assertEquals(2, bm.getUnderReplicatedNotMissingBlocks());
Assert.assertEquals(0, (long)(Long) mbs.getAttribute(mxbeanName,
"NumberOfMissingBlocks"));
Path replOneFile = new Path("/testMissingBlocks/replOneFile");
DFSTestUtil.createFile(dfs, replOneFile, fileLen, (short)1, 0);
ExtendedBlock replOneBlock = DFSTestUtil.getFirstBlock(
dfs, replOneFile);
assertTrue(cluster.corruptReplica(0, replOneBlock));
// read the file so that the corrupt block is reported to NN
in = dfs.open(replOneFile);
try {
in.readFully(new byte[fileLen]);
} catch (ChecksumException ignored) { // checksum error is expected.
}
in.close();
assertEquals(1, dfs.getMissingReplOneBlocksCount());
Assert.assertEquals(1, (long)(Long) mbs.getAttribute(mxbeanName,
"NumberOfMissingBlocksWithReplicationFactorOne"));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
}
| 5,507 | 36.216216 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.junit.Test;
public class TestBlockMissingException {
final static Log LOG = LogFactory.getLog("org.apache.hadoop.hdfs.TestBlockMissing");
final static int NUM_DATANODES = 3;
Configuration conf;
MiniDFSCluster dfs = null;
DistributedFileSystem fileSys = null;
/**
* Test DFS Raid
*/
@Test
public void testBlockMissingException() throws Exception {
LOG.info("Test testBlockMissingException started.");
long blockSize = 1024L;
int numBlocks = 4;
conf = new HdfsConfiguration();
// Set short retry timeouts so this test runs faster
conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
try {
dfs = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).build();
dfs.waitActive();
fileSys = dfs.getFileSystem();
Path file1 = new Path("/user/dhruba/raidtest/file1");
createOldFile(fileSys, file1, 1, numBlocks, blockSize);
// extract block locations from File system. Wait till file is closed.
LocatedBlocks locations = null;
locations = fileSys.dfs.getNamenode().getBlockLocations(file1.toString(),
0, numBlocks * blockSize);
// remove block of file
LOG.info("Remove first block of file");
corruptBlock(file1, locations.get(0).getBlock());
// validate that the system throws BlockMissingException
validateFile(fileSys, file1);
} finally {
if (fileSys != null) fileSys.close();
if (dfs != null) dfs.shutdown();
}
LOG.info("Test testBlockMissingException completed.");
}
//
// creates a file and populate it with data.
//
private void createOldFile(FileSystem fileSys, Path name, int repl, int numBlocks, long blocksize)
throws IOException {
FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
(short) repl, blocksize);
// fill data into file
final byte[] b = new byte[(int)blocksize];
for (int i = 0; i < numBlocks; i++) {
stm.write(b);
}
stm.close();
}
//
// validates that file encounters BlockMissingException
//
private void validateFile(FileSystem fileSys, Path name)
throws IOException {
FSDataInputStream stm = fileSys.open(name);
final byte[] b = new byte[4192];
int num = 0;
boolean gotException = false;
try {
while (num >= 0) {
num = stm.read(b);
if (num < 0) {
break;
}
}
} catch (BlockMissingException e) {
gotException = true;
}
stm.close();
assertTrue("Expected BlockMissingException ", gotException);
}
//
// Corrupt specified block of file
//
void corruptBlock(Path file, ExtendedBlock blk) {
// Now deliberately remove/truncate data blocks from the file.
File[] blockFiles = dfs.getAllBlockFiles(blk);
for (File f : blockFiles) {
f.delete();
LOG.info("Deleted block " + f);
}
}
}
| 4,427 | 32.044776 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.io.OutputStream;
import java.net.InetSocketAddress;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.util.ThreadUtil;
import org.junit.Test;
/**
* This class tests the decommissioning of nodes.
*/
public class TestModTime {
static final long seed = 0xDEADBEEFL;
static final int blockSize = 8192;
static final int fileSize = 16384;
static final int numDatanodes = 6;
Random myrand = new Random();
Path hostsFile;
Path excludeFile;
private void writeFile(FileSystem fileSys, Path name, int repl)
throws IOException {
// create and write a file that contains three blocks of data
FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
(short) repl, blockSize);
byte[] buffer = new byte[fileSize];
Random rand = new Random(seed);
rand.nextBytes(buffer);
stm.write(buffer);
stm.close();
}
private void cleanupFile(FileSystem fileSys, Path name) throws IOException {
assertTrue(fileSys.exists(name));
fileSys.delete(name, true);
assertTrue(!fileSys.exists(name));
}
private void printDatanodeReport(DatanodeInfo[] info) {
System.out.println("-------------------------------------------------");
for (int i = 0; i < info.length; i++) {
System.out.println(info[i].getDatanodeReport());
System.out.println();
}
}
/**
* Tests modification time in DFS.
*/
@Test
public void testModTime() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(numDatanodes).build();
cluster.waitActive();
InetSocketAddress addr = new InetSocketAddress("localhost",
cluster.getNameNodePort());
DFSClient client = new DFSClient(addr, conf);
DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
assertEquals("Number of Datanodes ", numDatanodes, info.length);
FileSystem fileSys = cluster.getFileSystem();
int replicas = numDatanodes - 1;
assertTrue(fileSys instanceof DistributedFileSystem);
try {
//
// create file and record ctime and mtime of test file
//
System.out.println("Creating testdir1 and testdir1/test1.dat.");
Path dir1 = new Path("testdir1");
Path file1 = new Path(dir1, "test1.dat");
writeFile(fileSys, file1, replicas);
FileStatus stat = fileSys.getFileStatus(file1);
long mtime1 = stat.getModificationTime();
assertTrue(mtime1 != 0);
//
// record dir times
//
stat = fileSys.getFileStatus(dir1);
long mdir1 = stat.getModificationTime();
//
// create second test file
//
System.out.println("Creating testdir1/test2.dat.");
Path file2 = new Path(dir1, "test2.dat");
writeFile(fileSys, file2, replicas);
stat = fileSys.getFileStatus(file2);
//
// verify that mod time of dir remains the same
// as before. modification time of directory has increased.
//
stat = fileSys.getFileStatus(dir1);
assertTrue(stat.getModificationTime() >= mdir1);
mdir1 = stat.getModificationTime();
//
// create another directory
//
Path dir2 = fileSys.makeQualified(new Path("testdir2/"));
System.out.println("Creating testdir2 " + dir2);
assertTrue(fileSys.mkdirs(dir2));
stat = fileSys.getFileStatus(dir2);
long mdir2 = stat.getModificationTime();
//
// rename file1 from testdir into testdir2
//
Path newfile = new Path(dir2, "testnew.dat");
System.out.println("Moving " + file1 + " to " + newfile);
fileSys.rename(file1, newfile);
//
// verify that modification time of file1 did not change.
//
stat = fileSys.getFileStatus(newfile);
assertTrue(stat.getModificationTime() == mtime1);
//
// verify that modification time of testdir1 and testdir2
// were changed.
//
stat = fileSys.getFileStatus(dir1);
assertTrue(stat.getModificationTime() != mdir1);
mdir1 = stat.getModificationTime();
stat = fileSys.getFileStatus(dir2);
assertTrue(stat.getModificationTime() != mdir2);
mdir2 = stat.getModificationTime();
//
// delete newfile
//
System.out.println("Deleting testdir2/testnew.dat.");
assertTrue(fileSys.delete(newfile, true));
//
// verify that modification time of testdir1 has not changed.
//
stat = fileSys.getFileStatus(dir1);
assertTrue(stat.getModificationTime() == mdir1);
//
// verify that modification time of testdir2 has changed.
//
stat = fileSys.getFileStatus(dir2);
assertTrue(stat.getModificationTime() != mdir2);
mdir2 = stat.getModificationTime();
cleanupFile(fileSys, file2);
cleanupFile(fileSys, dir1);
cleanupFile(fileSys, dir2);
} catch (IOException e) {
info = client.datanodeReport(DatanodeReportType.ALL);
printDatanodeReport(info);
throw e;
} finally {
fileSys.close();
cluster.shutdown();
}
}
/**
* Regression test for HDFS-3864 - NN does not update internal file mtime for
* OP_CLOSE when reading from the edit log.
*/
@Test
public void testModTimePersistsAfterRestart() throws IOException {
final long sleepTime = 10; // 10 milliseconds
MiniDFSCluster cluster = null;
FileSystem fs = null;
Configuration conf = new HdfsConfiguration();
try {
cluster = new MiniDFSCluster.Builder(conf).build();
fs = cluster.getFileSystem();
Path testPath = new Path("/test");
// Open a file, and get its initial modification time.
OutputStream out = fs.create(testPath);
long initialModTime = fs.getFileStatus(testPath).getModificationTime();
assertTrue(initialModTime > 0);
// Wait and then close the file. Ensure that the mod time goes up.
ThreadUtil.sleepAtLeastIgnoreInterrupts(sleepTime);
out.close();
long modTimeAfterClose = fs.getFileStatus(testPath).getModificationTime();
assertTrue(modTimeAfterClose >= initialModTime + sleepTime);
// Restart the NN, and make sure that the later mod time is still used.
cluster.restartNameNode();
long modTimeAfterRestart = fs.getFileStatus(testPath).getModificationTime();
assertEquals(modTimeAfterClose, modTimeAfterRestart);
} finally {
if (fs != null) {
fs.close();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
public static void main(String[] args) throws Exception {
new TestModTime().testModTime();
}
}
| 8,109 | 33.219409 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import java.io.IOException;
import java.nio.channels.ClosedChannelException;
import java.util.ArrayList;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.metrics2.util.Quantile;
import org.apache.hadoop.metrics2.util.SampleQuantiles;
import org.apache.hadoop.util.StopWatch;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.junit.Test;
/**
* This class tests hflushing concurrently from many threads.
*/
public class TestMultiThreadedHflush {
static final int blockSize = 1024*1024;
private static final int NUM_THREADS = 10;
private static final int WRITE_SIZE = 517;
private static final int NUM_WRITES_PER_THREAD = 1000;
private byte[] toWrite = null;
private final SampleQuantiles quantiles = new SampleQuantiles(
new Quantile[] {
new Quantile(0.50, 0.050),
new Quantile(0.75, 0.025), new Quantile(0.90, 0.010),
new Quantile(0.95, 0.005), new Quantile(0.99, 0.001) });
/*
* creates a file but does not close it
*/
private FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl)
throws IOException {
FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
(short) repl, blockSize);
return stm;
}
private void initBuffer(int size) {
long seed = AppendTestUtil.nextLong();
toWrite = AppendTestUtil.randomBytes(seed, size);
}
private class WriterThread extends Thread {
private final FSDataOutputStream stm;
private final AtomicReference<Throwable> thrown;
private final int numWrites;
private final CountDownLatch countdown;
public WriterThread(FSDataOutputStream stm,
AtomicReference<Throwable> thrown,
CountDownLatch countdown, int numWrites) {
this.stm = stm;
this.thrown = thrown;
this.numWrites = numWrites;
this.countdown = countdown;
}
@Override
public void run() {
try {
countdown.await();
for (int i = 0; i < numWrites && thrown.get() == null; i++) {
doAWrite();
}
} catch (Throwable t) {
thrown.compareAndSet(null, t);
}
}
private void doAWrite() throws IOException {
StopWatch sw = new StopWatch().start();
stm.write(toWrite);
stm.hflush();
long micros = sw.now(TimeUnit.MICROSECONDS);
quantiles.insert(micros);
}
}
/**
* Test case where a bunch of threads are both appending and flushing.
* They all finish before the file is closed.
*/
@Test
public void testMultipleHflushersRepl1() throws Exception {
doTestMultipleHflushers(1);
}
@Test
public void testMultipleHflushersRepl3() throws Exception {
doTestMultipleHflushers(3);
}
private void doTestMultipleHflushers(int repl) throws Exception {
Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(repl)
.build();
FileSystem fs = cluster.getFileSystem();
Path p = new Path("/multiple-hflushers.dat");
try {
doMultithreadedWrites(conf, p, NUM_THREADS, WRITE_SIZE,
NUM_WRITES_PER_THREAD, repl);
System.out.println("Latency quantiles (in microseconds):\n" +
quantiles);
} finally {
fs.close();
cluster.shutdown();
}
}
/**
* Test case where a bunch of threads are continuously calling hflush() while another
* thread appends some data and then closes the file.
*
* The hflushing threads should eventually catch an IOException stating that the stream
* was closed -- and not an NPE or anything like that.
*/
@Test
public void testHflushWhileClosing() throws Throwable {
Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = cluster.getFileSystem();
Path p = new Path("/hflush-and-close.dat");
final FSDataOutputStream stm = createFile(fs, p, 1);
ArrayList<Thread> flushers = new ArrayList<Thread>();
final AtomicReference<Throwable> thrown = new AtomicReference<Throwable>();
try {
for (int i = 0; i < 10; i++) {
Thread flusher = new Thread() {
@Override
public void run() {
try {
while (true) {
try {
stm.hflush();
} catch (ClosedChannelException ioe) {
// Expected exception caught. Ignoring.
return;
}
}
} catch (Throwable t) {
thrown.set(t);
}
}
};
flusher.start();
flushers.add(flusher);
}
// Write some data
for (int i = 0; i < 10000; i++) {
stm.write(1);
}
// Close it while the flushing threads are still flushing
stm.close();
// Wait for the flushers to all die.
for (Thread t : flushers) {
t.join();
}
// They should have all gotten the expected exception, not anything
// else.
if (thrown.get() != null) {
throw thrown.get();
}
} finally {
fs.close();
cluster.shutdown();
}
}
public void doMultithreadedWrites(
Configuration conf, Path p, int numThreads, int bufferSize, int numWrites,
int replication) throws Exception {
initBuffer(bufferSize);
// create a new file.
FileSystem fs = p.getFileSystem(conf);
FSDataOutputStream stm = createFile(fs, p, replication);
System.out.println("Created file simpleFlush.dat");
// There have been a couple issues with flushing empty buffers, so do
// some empty flushes first.
stm.hflush();
stm.hflush();
stm.write(1);
stm.hflush();
stm.hflush();
CountDownLatch countdown = new CountDownLatch(1);
ArrayList<Thread> threads = new ArrayList<Thread>();
AtomicReference<Throwable> thrown = new AtomicReference<Throwable>();
for (int i = 0; i < numThreads; i++) {
Thread t = new WriterThread(stm, thrown, countdown, numWrites);
threads.add(t);
t.start();
}
// Start all the threads at the same time for maximum raciness!
countdown.countDown();
for (Thread t : threads) {
t.join();
}
if (thrown.get() != null) {
throw new RuntimeException("Deferred", thrown.get());
}
stm.close();
System.out.println("Closed file.");
}
public static void main(String args[]) throws Exception {
System.exit(ToolRunner.run(new CLIBenchmark(), args));
}
private static class CLIBenchmark extends Configured implements Tool {
public int run(String args[]) throws Exception {
if (args.length != 1) {
System.err.println(
"usage: " + TestMultiThreadedHflush.class.getSimpleName() +
" <path to test file> ");
System.err.println(
"Configurations settable by -D options:\n" +
" num.threads [default 10] - how many threads to run\n" +
" write.size [default 511] - bytes per write\n" +
" num.writes [default 50000] - how many writes to perform");
System.exit(1);
}
TestMultiThreadedHflush test = new TestMultiThreadedHflush();
Configuration conf = getConf();
Path p = new Path(args[0]);
int numThreads = conf.getInt("num.threads", 10);
int writeSize = conf.getInt("write.size", 511);
int numWrites = conf.getInt("num.writes", 50000);
int replication = conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,
DFSConfigKeys.DFS_REPLICATION_DEFAULT);
StopWatch sw = new StopWatch().start();
test.doMultithreadedWrites(conf, p, numThreads, writeSize, numWrites,
replication);
sw.stop();
System.out.println("Finished in " + sw.now(TimeUnit.MILLISECONDS) + "ms");
System.out.println("Latency quantiles (in microseconds):\n" +
test.quantiles);
return 0;
}
}
}
| 9,290 | 31.037931 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DataNodeCluster.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import java.io.IOException;
import java.net.UnknownHostException;
import java.util.Arrays;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.namenode.CreateEditsLog;
import org.apache.hadoop.net.DNS;
import org.apache.hadoop.util.Time;
/**
*
*
* This program starts a mini cluster of data nodes
* (ie a mini cluster without the name node), all within one address space.
* It is assumed that the name node has been started separately prior
* to running this program.
*
* A use case of this is to run a real name node with a large number of
* simulated data nodes for say a NN benchmark.
*
* Synopisis:
* DataNodeCluster -n numDatNodes [-racks numRacks] -simulated
* [-inject startingBlockId numBlocksPerDN]
* [ -r replicationForInjectedBlocks ]
* [-d editsLogDirectory]
*
* if -simulated is specified then simulated data nodes are started.
* if -inject is specified then blocks are injected in each datanode;
* -inject option is valid only for simulated data nodes.
*
* See Also @link #CreateEditsLog for creating a edits log file to
* inject a matching set of blocks into into a name node.
* Typical use of -inject is to inject blocks into a set of datanodes
* using this DataNodeCLuster command
* and then to inject the same blocks into a name node using the
* CreateEditsLog command.
*
*/
public class DataNodeCluster {
static final String DATANODE_DIRS = "/tmp/DataNodeCluster";
static String dataNodeDirs = DATANODE_DIRS;
static final String USAGE =
"Usage: datanodecluster " +
" -n <numDataNodes> " +
" -bpid <bpid>" +
" [-racks <numRacks>] " +
" [-simulated [<simulatedCapacityPerDn>]] " +
" [-inject startingBlockId numBlocksPerDN]" +
" [-r replicationFactorForInjectedBlocks]" +
" [-d dataNodeDirs]\n" +
" [-checkDataNodeAddrConfig]\n" +
" Default datanode direcory is " + DATANODE_DIRS + "\n" +
" Default replication factor for injected blocks is 1\n" +
" Defaul rack is used if -racks is not specified\n" +
" Data nodes are simulated if -simulated OR conf file specifies simulated\n" +
" -checkDataNodeAddrConfig tells DataNodeConf to use data node addresses from conf file, if it is set. If not set, use .localhost'.";
static void printUsageExit() {
System.out.println(USAGE);
System.exit(-1);
}
static void printUsageExit(String err) {
System.out.println(err);
printUsageExit();
}
public static void main(String[] args) throws InterruptedException {
int numDataNodes = 0;
int numRacks = 0;
boolean inject = false;
long startingBlockId = 1;
int numBlocksPerDNtoInject = 0;
int replication = 1;
boolean checkDataNodeAddrConfig = false;
long simulatedCapacityPerDn = SimulatedFSDataset.DEFAULT_CAPACITY;
String bpid = null;
Configuration conf = new HdfsConfiguration();
for (int i = 0; i < args.length; i++) { // parse command line
if (args[i].equals("-n")) {
if (++i >= args.length || args[i].startsWith("-")) {
printUsageExit("missing number of nodes");
}
numDataNodes = Integer.parseInt(args[i]);
} else if (args[i].equals("-racks")) {
if (++i >= args.length || args[i].startsWith("-")) {
printUsageExit("Missing number of racks");
}
numRacks = Integer.parseInt(args[i]);
} else if (args[i].equals("-r")) {
if (++i >= args.length || args[i].startsWith("-")) {
printUsageExit("Missing replication factor");
}
replication = Integer.parseInt(args[i]);
} else if (args[i].equals("-d")) {
if (++i >= args.length || args[i].startsWith("-")) {
printUsageExit("Missing datanode dirs parameter");
}
dataNodeDirs = args[i];
} else if (args[i].equals("-simulated")) {
SimulatedFSDataset.setFactory(conf);
if ((i+1) < args.length && !args[i+1].startsWith("-")) {
simulatedCapacityPerDn = Long.parseLong(args[++i]);
}
} else if (args[i].equals("-bpid")) {
if (++i >= args.length || args[i].startsWith("-")) {
printUsageExit("Missing blockpoolid parameter");
}
bpid = args[i];
} else if (args[i].equals("-inject")) {
if (!FsDatasetSpi.Factory.getFactory(conf).isSimulated()) {
System.out.print("-inject is valid only for simulated");
printUsageExit();
}
inject = true;
if (++i >= args.length || args[i].startsWith("-")) {
printUsageExit(
"Missing starting block and number of blocks per DN to inject");
}
startingBlockId = Integer.parseInt(args[i]);
if (++i >= args.length || args[i].startsWith("-")) {
printUsageExit("Missing number of blocks to inject");
}
numBlocksPerDNtoInject = Integer.parseInt(args[i]);
} else if (args[i].equals("-checkDataNodeAddrConfig")) {
checkDataNodeAddrConfig = true;
} else {
printUsageExit();
}
}
if (numDataNodes <= 0 || replication <= 0 ) {
printUsageExit("numDataNodes and replication have to be greater than zero");
}
if (replication > numDataNodes) {
printUsageExit("Replication must be less than or equal to numDataNodes");
}
if (bpid == null) {
printUsageExit("BlockPoolId must be provided");
}
String nameNodeAdr = FileSystem.getDefaultUri(conf).getAuthority();
if (nameNodeAdr == null) {
System.out.println("No name node address and port in config");
System.exit(-1);
}
boolean simulated = FsDatasetSpi.Factory.getFactory(conf).isSimulated();
System.out.println("Starting " + numDataNodes +
(simulated ? " Simulated " : " ") +
" Data Nodes that will connect to Name Node at " + nameNodeAdr);
System.setProperty("test.build.data", dataNodeDirs);
long simulatedCapacities[] = new long[numDataNodes];
for (int i = 0; i < numDataNodes; ++i) {
simulatedCapacities[i] = simulatedCapacityPerDn;
}
MiniDFSCluster mc = new MiniDFSCluster();
try {
mc.formatDataNodeDirs();
} catch (IOException e) {
System.out.println("Error formating data node dirs:" + e);
}
String[] rack4DataNode = null;
if (numRacks > 0) {
System.out.println("Using " + numRacks + " racks: ");
String rackPrefix = getUniqueRackPrefix();
rack4DataNode = new String[numDataNodes];
for (int i = 0; i < numDataNodes; ++i ) {
//rack4DataNode[i] = racks[i%numRacks];
rack4DataNode[i] = rackPrefix + "-" + i%numRacks;
System.out.println("Data Node " + i + " using " + rack4DataNode[i]);
}
}
try {
mc.startDataNodes(conf, numDataNodes, true, StartupOption.REGULAR,
rack4DataNode, null, simulatedCapacities, false, checkDataNodeAddrConfig);
Thread.sleep(10*1000); // Give the DN some time to connect to NN and init storage directories.
if (inject) {
long blockSize = 10;
System.out.println("Injecting " + numBlocksPerDNtoInject +
" blocks in each DN starting at blockId " + startingBlockId +
" with blocksize of " + blockSize);
Block[] blocks = new Block[numBlocksPerDNtoInject];
long blkid = startingBlockId;
for (int i_dn = 0; i_dn < numDataNodes; ++i_dn) {
for (int i = 0; i < blocks.length; ++i) {
blocks[i] = new Block(blkid++, blockSize,
CreateEditsLog.BLOCK_GENERATION_STAMP);
}
for (int i = 1; i <= replication; ++i) {
// inject blocks for dn_i into dn_i and replica in dn_i's neighbors
mc.injectBlocks((i_dn + i- 1)% numDataNodes, Arrays.asList(blocks), bpid);
System.out.println("Injecting blocks of dn " + i_dn + " into dn" +
((i_dn + i- 1)% numDataNodes));
}
}
System.out.println("Created blocks from Bids "
+ startingBlockId + " to " + (blkid -1));
}
} catch (IOException e) {
System.out.println("Error creating data node:" + e);
}
}
/*
* There is high probability that the rack id generated here will
* not conflict with those of other data node cluster.
* Not perfect but mostly unique rack ids are good enough
*/
static private String getUniqueRackPrefix() {
String ip = "unknownIP";
try {
ip = DNS.getDefaultIP("default");
} catch (UnknownHostException ignored) {
System.out.println("Could not find ip address of \"default\" inteface.");
}
int rand = DFSUtil.getSecureRandom().nextInt(Integer.MAX_VALUE);
return "/Rack-" + rand + "-"+ ip + "-" + Time.now();
}
}
| 10,005 | 38.085938 | 142 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.StringUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import com.google.common.base.Supplier;
import com.google.common.collect.Lists;
/**
* Tests to verify safe mode correctness.
*/
public class TestSafeMode {
public static final Log LOG = LogFactory.getLog(TestSafeMode.class);
private static final Path TEST_PATH = new Path("/test");
private static final int BLOCK_SIZE = 1024;
private static final String NEWLINE = System.getProperty("line.separator");
Configuration conf;
MiniDFSCluster cluster;
FileSystem fs;
DistributedFileSystem dfs;
private static final String NN_METRICS = "NameNodeActivity";
@Before
public void startUp() throws IOException {
conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
fs = cluster.getFileSystem();
dfs = (DistributedFileSystem)fs;
}
@After
public void tearDown() throws IOException {
if (fs != null) {
fs.close();
}
if (cluster != null) {
cluster.shutdown();
}
}
/**
* This test verifies that if SafeMode is manually entered, name-node does not
* come out of safe mode even after the startup safe mode conditions are met.
* <ol>
* <li>Start cluster with 1 data-node.</li>
* <li>Create 2 files with replication 1.</li>
* <li>Re-start cluster with 0 data-nodes.
* Name-node should stay in automatic safe-mode.</li>
* <li>Enter safe mode manually.</li>
* <li>Start the data-node.</li>
* <li>Wait longer than <tt>dfs.namenode.safemode.extension</tt> and
* verify that the name-node is still in safe mode.</li>
* </ol>
*
* @throws IOException
*/
@Test
public void testManualSafeMode() throws IOException {
fs = cluster.getFileSystem();
Path file1 = new Path("/tmp/testManualSafeMode/file1");
Path file2 = new Path("/tmp/testManualSafeMode/file2");
// create two files with one block each.
DFSTestUtil.createFile(fs, file1, 1000, (short)1, 0);
DFSTestUtil.createFile(fs, file2, 1000, (short)1, 0);
fs.close();
cluster.shutdown();
// now bring up just the NameNode.
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).build();
cluster.waitActive();
dfs = cluster.getFileSystem();
assertTrue("No datanode is started. Should be in SafeMode",
dfs.setSafeMode(SafeModeAction.SAFEMODE_GET));
// manually set safemode.
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
// now bring up the datanode and wait for it to be active.
cluster.startDataNodes(conf, 1, true, null, null);
cluster.waitActive();
// wait longer than dfs.namenode.safemode.extension
try {
Thread.sleep(2000);
} catch (InterruptedException ignored) {}
assertTrue("should still be in SafeMode",
dfs.setSafeMode(SafeModeAction.SAFEMODE_GET));
assertFalse("should not be in SafeMode",
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE));
}
/**
* Test that, if there are no blocks in the filesystem,
* the NameNode doesn't enter the "safemode extension" period.
*/
@Test(timeout=45000)
public void testNoExtensionIfNoBlocks() throws IOException {
cluster.getConfiguration(0).setInt(
DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 60000);
cluster.restartNameNode();
// Even though we have safemode extension set high, we should immediately
// exit safemode on startup because there are no blocks in the namespace.
String status = cluster.getNameNode().getNamesystem().getSafemode();
assertEquals("", status);
}
/**
* Test that the NN initializes its under-replicated blocks queue
* before it is ready to exit safemode (HDFS-1476)
*/
@Test(timeout=45000)
public void testInitializeReplQueuesEarly() throws Exception {
LOG.info("Starting testInitializeReplQueuesEarly");
// Spray the blocks around the cluster when we add DNs instead of
// concentrating all blocks on the first node.
BlockManagerTestUtil.setWritingPrefersLocalNode(
cluster.getNamesystem().getBlockManager(), false);
cluster.startDataNodes(conf, 2, true, StartupOption.REGULAR, null);
cluster.waitActive();
LOG.info("Creating files");
DFSTestUtil.createFile(fs, TEST_PATH, 15*BLOCK_SIZE, (short)1, 1L);
LOG.info("Stopping all DataNodes");
List<DataNodeProperties> dnprops = Lists.newLinkedList();
dnprops.add(cluster.stopDataNode(0));
dnprops.add(cluster.stopDataNode(0));
dnprops.add(cluster.stopDataNode(0));
cluster.getConfiguration(0).setFloat(
DFSConfigKeys.DFS_NAMENODE_REPL_QUEUE_THRESHOLD_PCT_KEY, 1f/15f);
LOG.info("Restarting NameNode");
cluster.restartNameNode();
final NameNode nn = cluster.getNameNode();
String status = nn.getNamesystem().getSafemode();
assertEquals("Safe mode is ON. The reported blocks 0 needs additional " +
"14 blocks to reach the threshold 0.9990 of total blocks 15." + NEWLINE +
"The number of live datanodes 0 has reached the minimum number 0. " +
"Safe mode will be turned off automatically once the thresholds " +
"have been reached.", status);
assertFalse("Mis-replicated block queues should not be initialized " +
"until threshold is crossed",
NameNodeAdapter.safeModeInitializedReplQueues(nn));
LOG.info("Restarting one DataNode");
cluster.restartDataNode(dnprops.remove(0));
// Wait for block reports from all attached storages of
// the restarted DN to come in.
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
return getLongCounter("StorageBlockReportOps", getMetrics(NN_METRICS)) ==
cluster.getStoragesPerDatanode();
}
}, 10, 10000);
final int safe = NameNodeAdapter.getSafeModeSafeBlocks(nn);
assertTrue("Expected first block report to make some blocks safe.", safe > 0);
assertTrue("Did not expect first block report to make all blocks safe.", safe < 15);
assertTrue(NameNodeAdapter.safeModeInitializedReplQueues(nn));
// Ensure that UnderReplicatedBlocks goes up to 15 - safe. Misreplicated
// blocks are processed asynchronously so this may take a few seconds.
// Failure here will manifest as a test timeout.
BlockManagerTestUtil.updateState(nn.getNamesystem().getBlockManager());
long underReplicatedBlocks = nn.getNamesystem().getUnderReplicatedBlocks();
while (underReplicatedBlocks != (15 - safe)) {
LOG.info("UnderReplicatedBlocks expected=" + (15 - safe) +
", actual=" + underReplicatedBlocks);
Thread.sleep(100);
BlockManagerTestUtil.updateState(nn.getNamesystem().getBlockManager());
underReplicatedBlocks = nn.getNamesystem().getUnderReplicatedBlocks();
}
cluster.restartDataNodes();
}
/**
* Test that, when under-replicated blocks are processed at the end of
* safe-mode, blocks currently under construction are not considered
* under-construction or missing. Regression test for HDFS-2822.
*/
@Test
public void testRbwBlocksNotConsideredUnderReplicated() throws IOException {
List<FSDataOutputStream> stms = Lists.newArrayList();
try {
// Create some junk blocks so that the NN doesn't just immediately
// exit safemode on restart.
DFSTestUtil.createFile(fs, new Path("/junk-blocks"),
BLOCK_SIZE*4, (short)1, 1L);
// Create several files which are left open. It's important to
// create several here, because otherwise the first iteration of the
// replication monitor will pull them off the replication queue and
// hide this bug from the test!
for (int i = 0; i < 10; i++) {
FSDataOutputStream stm = fs.create(
new Path("/append-" + i), true, BLOCK_SIZE, (short) 1, BLOCK_SIZE);
stms.add(stm);
stm.write(1);
stm.hflush();
}
cluster.restartNameNode();
FSNamesystem ns = cluster.getNameNode(0).getNamesystem();
BlockManagerTestUtil.updateState(ns.getBlockManager());
assertEquals(0, ns.getPendingReplicationBlocks());
assertEquals(0, ns.getCorruptReplicaBlocks());
assertEquals(0, ns.getMissingBlocksCount());
} finally {
for (FSDataOutputStream stm : stms) {
IOUtils.closeStream(stm);
}
cluster.shutdown();
}
}
public interface FSRun {
public abstract void run(FileSystem fs) throws IOException;
}
/**
* Assert that the given function fails to run due to a safe
* mode exception.
*/
public void runFsFun(String msg, FSRun f) {
try {
f.run(fs);
fail(msg);
} catch (RemoteException re) {
assertEquals(SafeModeException.class.getName(), re.getClassName());
GenericTestUtils.assertExceptionContains("Name node is in safe mode", re);
} catch (SafeModeException ignored) {
} catch (IOException ioe) {
fail(msg + " " + StringUtils.stringifyException(ioe));
}
}
/**
* Run various fs operations while the NN is in safe mode,
* assert that they are either allowed or fail as expected.
*/
@Test
public void testOperationsWhileInSafeMode() throws IOException,
InterruptedException {
final Path file1 = new Path("/file1");
assertFalse(dfs.setSafeMode(SafeModeAction.SAFEMODE_GET));
DFSTestUtil.createFile(fs, file1, 1024, (short)1, 0);
assertTrue("Could not enter SM",
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER));
runFsFun("Set quota while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
((DistributedFileSystem)fs).setQuota(file1, 1, 1);
}});
runFsFun("Set perm while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.setPermission(file1, FsPermission.getDefault());
}});
runFsFun("Set owner while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.setOwner(file1, "user", "group");
}});
runFsFun("Set repl while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.setReplication(file1, (short)1);
}});
runFsFun("Append file while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
DFSTestUtil.appendFile(fs, file1, "new bytes");
}});
runFsFun("Truncate file while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.truncate(file1, 0);
}});
runFsFun("Delete file while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.delete(file1, false);
}});
runFsFun("Rename file while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.rename(file1, new Path("file2"));
}});
runFsFun("Set time while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.setTimes(file1, 0, 0);
}});
runFsFun("modifyAclEntries while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.modifyAclEntries(file1, Lists.<AclEntry>newArrayList());
}});
runFsFun("removeAclEntries while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.removeAclEntries(file1, Lists.<AclEntry>newArrayList());
}});
runFsFun("removeDefaultAcl while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.removeDefaultAcl(file1);
}});
runFsFun("removeAcl while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.removeAcl(file1);
}});
runFsFun("setAcl while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.setAcl(file1, Lists.<AclEntry>newArrayList());
}});
runFsFun("setXAttr while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.setXAttr(file1, "user.a1", null);
}});
runFsFun("removeXAttr while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.removeXAttr(file1, "user.a1");
}});
try {
DFSTestUtil.readFile(fs, file1);
} catch (IOException ioe) {
fail("Set times failed while in SM");
}
try {
fs.getAclStatus(file1);
} catch (IOException ioe) {
fail("getAclStatus failed while in SM");
}
// Test access
UserGroupInformation ugiX = UserGroupInformation.createRemoteUser("userX");
FileSystem myfs = ugiX.doAs(new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws IOException {
return FileSystem.get(conf);
}
});
myfs.access(file1, FsAction.READ);
try {
myfs.access(file1, FsAction.WRITE);
fail("The access call should have failed.");
} catch (AccessControlException e) {
// expected
}
assertFalse("Could not leave SM",
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE));
}
/**
* Verify that the NameNode stays in safemode when dfs.safemode.datanode.min
* is set to a number greater than the number of live datanodes.
*/
@Test
public void testDatanodeThreshold() throws IOException {
cluster.shutdown();
Configuration conf = cluster.getConfiguration(0);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 0);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY, 1);
cluster.restartNameNode();
fs = cluster.getFileSystem();
String tipMsg = cluster.getNamesystem().getSafemode();
assertTrue("Safemode tip message doesn't look right: " + tipMsg,
tipMsg.contains("The number of live datanodes 0 needs an additional " +
"1 live datanodes to reach the minimum number 1." +
NEWLINE + "Safe mode will be turned off automatically"));
// Start a datanode
cluster.startDataNodes(conf, 1, true, null, null);
// Wait long enough for safemode check to refire
try {
Thread.sleep(1000);
} catch (InterruptedException ignored) {}
// We now should be out of safe mode.
assertEquals("", cluster.getNamesystem().getSafemode());
}
/*
* Tests some utility methods that surround the SafeMode's state.
* @throws IOException when there's an issue connecting to the test DFS.
*/
public void testSafeModeUtils() throws IOException {
dfs = cluster.getFileSystem();
// Enter safemode.
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
assertTrue("State was expected to be in safemode.", dfs.isInSafeMode());
// Exit safemode.
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
assertFalse("State was expected to be out of safemode.", dfs.isInSafeMode());
}
@Test
public void testSafeModeWhenZeroBlockLocations() throws IOException {
try {
Path file1 = new Path("/tmp/testManualSafeMode/file1");
Path file2 = new Path("/tmp/testManualSafeMode/file2");
System.out.println("Created file1 and file2.");
// create two files with one block each.
DFSTestUtil.createFile(fs, file1, 1000, (short)1, 0);
DFSTestUtil.createFile(fs, file2, 2000, (short)1, 0);
checkGetBlockLocationsWorks(fs, file1);
NameNode namenode = cluster.getNameNode();
// manually set safemode.
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
assertTrue("should still be in SafeMode", namenode.isInSafeMode());
// getBlock locations should still work since block locations exists
checkGetBlockLocationsWorks(fs, file1);
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
assertFalse("should not be in SafeMode", namenode.isInSafeMode());
// Now 2nd part of the tests where there aren't block locations
cluster.shutdownDataNodes();
cluster.shutdownNameNode(0);
// now bring up just the NameNode.
cluster.restartNameNode();
cluster.waitActive();
System.out.println("Restarted cluster with just the NameNode");
namenode = cluster.getNameNode();
assertTrue("No datanode is started. Should be in SafeMode",
namenode.isInSafeMode());
FileStatus stat = fs.getFileStatus(file1);
try {
fs.getFileBlockLocations(stat, 0, 1000);
assertTrue("Should have got safemode exception", false);
} catch (SafeModeException e) {
// as expected
} catch (RemoteException re) {
if (!re.getClassName().equals(SafeModeException.class.getName()))
assertTrue("Should have got safemode exception", false);
}
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
assertFalse("Should not be in safemode", namenode.isInSafeMode());
checkGetBlockLocationsWorks(fs, file1);
} finally {
if(fs != null) fs.close();
if(cluster!= null) cluster.shutdown();
}
}
void checkGetBlockLocationsWorks(FileSystem fs, Path fileName) throws IOException {
FileStatus stat = fs.getFileStatus(fileName);
try {
fs.getFileBlockLocations(stat, 0, 1000);
} catch (SafeModeException e) {
assertTrue("Should have not got safemode exception", false);
} catch (RemoteException re) {
assertTrue("Should have not got safemode exception", false);
}
}
}
| 20,559 | 35.261023 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE;
import static org.junit.Assert.*;
import java.io.File;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.util.StringUtils;
import org.junit.After;
import org.junit.Test;
import com.google.common.base.Charsets;
import com.google.common.collect.Lists;
/**
* This test ensures the appropriate response (successful or failure) from
* the system when the system is rolled back under various storage state and
* version conditions.
*/
public class TestDFSRollback {
private static final Log LOG = LogFactory.getLog(
"org.apache.hadoop.hdfs.TestDFSRollback");
private Configuration conf;
private int testCounter = 0;
private MiniDFSCluster cluster = null;
/**
* Writes an INFO log message containing the parameters.
*/
void log(String label, int numDirs) {
LOG.info("============================================================");
LOG.info("***TEST " + (testCounter++) + "*** "
+ label + ":"
+ " numDirs="+numDirs);
}
/**
* Verify that the new current directory is the old previous.
* It is assumed that the server has recovered and rolled back.
*/
void checkResult(NodeType nodeType, String[] baseDirs) throws Exception {
List<File> curDirs = Lists.newArrayList();
for (String baseDir : baseDirs) {
File curDir = new File(baseDir, "current");
curDirs.add(curDir);
switch (nodeType) {
case NAME_NODE:
FSImageTestUtil.assertReasonableNameCurrentDir(curDir);
break;
case DATA_NODE:
assertEquals(
UpgradeUtilities.checksumContents(nodeType, curDir, false),
UpgradeUtilities.checksumMasterDataNodeContents());
break;
}
}
FSImageTestUtil.assertParallelFilesAreIdentical(
curDirs, Collections.<String>emptySet());
for (int i = 0; i < baseDirs.length; i++) {
assertFalse(new File(baseDirs[i],"previous").isDirectory());
}
}
/**
* Attempts to start a NameNode with the given operation. Starting
* the NameNode should throw an exception.
*/
void startNameNodeShouldFail(String searchString) {
try {
NameNode.doRollback(conf, false);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
.format(false)
.manageDataDfsDirs(false)
.manageNameDfsDirs(false)
.build(); // should fail
throw new AssertionError("NameNode should have failed to start");
} catch (Exception expected) {
if (!expected.getMessage().contains(searchString)) {
fail("Expected substring '" + searchString + "' in exception " +
"but got: " + StringUtils.stringifyException(expected));
}
// expected
}
}
/**
* Attempts to start a DataNode with the given operation. Starting
* the given block pool should fail.
* @param operation startup option
* @param bpid block pool Id that should fail to start
* @throws IOException
*/
void startBlockPoolShouldFail(StartupOption operation, String bpid)
throws IOException {
cluster.startDataNodes(conf, 1, false, operation, null); // should fail
assertFalse("Block pool " + bpid + " should have failed to start",
cluster.getDataNodes().get(0).isBPServiceAlive(bpid));
}
/**
* This test attempts to rollback the NameNode and DataNode under
* a number of valid and invalid conditions.
*/
@Test
public void testRollback() throws Exception {
File[] baseDirs;
UpgradeUtilities.initialize();
StorageInfo storageInfo = null;
for (int numDirs = 1; numDirs <= 2; numDirs++) {
conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
String[] nameNodeDirs = conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
String[] dataNodeDirs = conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
log("Normal NameNode rollback", numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
NameNode.doRollback(conf, false);
checkResult(NAME_NODE, nameNodeDirs);
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("Normal DataNode rollback", numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
NameNode.doRollback(conf, false);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
.format(false)
.manageDataDfsDirs(false)
.manageNameDfsDirs(false)
.dnStartupOption(StartupOption.ROLLBACK)
.build();
UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "previous");
cluster.startDataNodes(conf, 1, false, StartupOption.ROLLBACK, null);
checkResult(DATA_NODE, dataNodeDirs);
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
UpgradeUtilities.createEmptyDirs(dataNodeDirs);
log("Normal BlockPool rollback", numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
NameNode.doRollback(conf, false);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
.format(false)
.manageDataDfsDirs(false)
.manageNameDfsDirs(false)
.dnStartupOption(StartupOption.ROLLBACK)
.build();
UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
UpgradeUtilities.createBlockPoolStorageDirs(dataNodeDirs, "current",
UpgradeUtilities.getCurrentBlockPoolID(cluster));
// Create a previous snapshot for the blockpool
UpgradeUtilities.createBlockPoolStorageDirs(dataNodeDirs, "previous",
UpgradeUtilities.getCurrentBlockPoolID(cluster));
// Put newer layout version in current.
storageInfo = new StorageInfo(
HdfsServerConstants.DATANODE_LAYOUT_VERSION - 1,
UpgradeUtilities.getCurrentNamespaceID(cluster),
UpgradeUtilities.getCurrentClusterID(cluster),
UpgradeUtilities.getCurrentFsscTime(cluster),
NodeType.DATA_NODE);
// Overwrite VERSION file in the current directory of
// volume directories and block pool slice directories
// with a layout version from future.
File[] dataCurrentDirs = new File[dataNodeDirs.length];
for (int i=0; i<dataNodeDirs.length; i++) {
dataCurrentDirs[i] = new File((new Path(dataNodeDirs[i]
+ "/current")).toString());
}
UpgradeUtilities.createDataNodeVersionFile(
dataCurrentDirs,
storageInfo,
UpgradeUtilities.getCurrentBlockPoolID(cluster));
cluster.startDataNodes(conf, 1, false, StartupOption.ROLLBACK, null);
assertTrue(cluster.isDataNodeUp());
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
UpgradeUtilities.createEmptyDirs(dataNodeDirs);
log("NameNode rollback without existing previous dir", numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
startNameNodeShouldFail(
"None of the storage directories contain previous fs state");
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("DataNode rollback without existing previous dir", numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
.format(false)
.manageDataDfsDirs(false)
.manageNameDfsDirs(false)
.startupOption(StartupOption.UPGRADE)
.build();
UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
cluster.startDataNodes(conf, 1, false, StartupOption.ROLLBACK, null);
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
UpgradeUtilities.createEmptyDirs(dataNodeDirs);
log("DataNode rollback with future stored layout version in previous", numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
NameNode.doRollback(conf, false);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
.format(false)
.manageDataDfsDirs(false)
.manageNameDfsDirs(false)
.dnStartupOption(StartupOption.ROLLBACK)
.build();
UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
baseDirs = UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "previous");
storageInfo = new StorageInfo(Integer.MIN_VALUE,
UpgradeUtilities.getCurrentNamespaceID(cluster),
UpgradeUtilities.getCurrentClusterID(cluster),
UpgradeUtilities.getCurrentFsscTime(cluster),
NodeType.DATA_NODE);
UpgradeUtilities.createDataNodeVersionFile(baseDirs, storageInfo,
UpgradeUtilities.getCurrentBlockPoolID(cluster));
startBlockPoolShouldFail(StartupOption.ROLLBACK,
cluster.getNamesystem().getBlockPoolId());
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
UpgradeUtilities.createEmptyDirs(dataNodeDirs);
log("DataNode rollback with newer fsscTime in previous", numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
NameNode.doRollback(conf, false);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
.format(false)
.manageDataDfsDirs(false)
.manageNameDfsDirs(false)
.dnStartupOption(StartupOption.ROLLBACK)
.build();
UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
baseDirs = UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "previous");
storageInfo = new StorageInfo(HdfsServerConstants.DATANODE_LAYOUT_VERSION,
UpgradeUtilities.getCurrentNamespaceID(cluster),
UpgradeUtilities.getCurrentClusterID(cluster), Long.MAX_VALUE,
NodeType.DATA_NODE);
UpgradeUtilities.createDataNodeVersionFile(baseDirs, storageInfo,
UpgradeUtilities.getCurrentBlockPoolID(cluster));
startBlockPoolShouldFail(StartupOption.ROLLBACK,
cluster.getNamesystem().getBlockPoolId());
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
UpgradeUtilities.createEmptyDirs(dataNodeDirs);
log("NameNode rollback with no edits file", numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
deleteMatchingFiles(baseDirs, "edits.*");
startNameNodeShouldFail("Gap in transactions");
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("NameNode rollback with no image file", numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
deleteMatchingFiles(baseDirs, "fsimage_.*");
startNameNodeShouldFail("No valid image files found");
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("NameNode rollback with corrupt version file", numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
for (File f : baseDirs) {
UpgradeUtilities.corruptFile(
new File(f,"VERSION"),
"layoutVersion".getBytes(Charsets.UTF_8),
"xxxxxxxxxxxxx".getBytes(Charsets.UTF_8));
}
startNameNodeShouldFail("file VERSION has layoutVersion missing");
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("NameNode rollback with old layout version in previous", numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
storageInfo = new StorageInfo(1,
UpgradeUtilities.getCurrentNamespaceID(null),
UpgradeUtilities.getCurrentClusterID(null),
UpgradeUtilities.getCurrentFsscTime(null), NodeType.NAME_NODE);
UpgradeUtilities.createNameNodeVersionFile(conf, baseDirs,
storageInfo, UpgradeUtilities.getCurrentBlockPoolID(cluster));
startNameNodeShouldFail("Cannot rollback to storage version 1 using this version");
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
} // end numDir loop
}
private void deleteMatchingFiles(File[] baseDirs, String regex) {
for (File baseDir : baseDirs) {
for (File f : baseDir.listFiles()) {
if (f.getName().matches(regex)) {
f.delete();
}
}
}
}
@After
public void tearDown() throws Exception {
LOG.info("Shutting down MiniDFSCluster");
if (cluster != null) cluster.shutdown();
}
public static void main(String[] args) throws Exception {
new TestDFSRollback().testRollback();
}
}
| 16,264 | 44.306407 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.client.HdfsAdmin;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class TestHdfsAdmin {
private static final Path TEST_PATH = new Path("/test");
private final Configuration conf = new Configuration();
private MiniDFSCluster cluster;
@Before
public void setUpCluster() throws IOException {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
}
@After
public void shutDownCluster() {
if (cluster != null) {
cluster.shutdown();
}
}
/**
* Test that we can set and clear quotas via {@link HdfsAdmin}.
*/
@Test
public void testHdfsAdminSetQuota() throws Exception {
HdfsAdmin dfsAdmin = new HdfsAdmin(
FileSystem.getDefaultUri(conf), conf);
FileSystem fs = null;
try {
fs = FileSystem.get(conf);
assertTrue(fs.mkdirs(TEST_PATH));
assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota());
assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota());
dfsAdmin.setSpaceQuota(TEST_PATH, 10);
assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota());
assertEquals(10, fs.getContentSummary(TEST_PATH).getSpaceQuota());
dfsAdmin.setQuota(TEST_PATH, 10);
assertEquals(10, fs.getContentSummary(TEST_PATH).getQuota());
assertEquals(10, fs.getContentSummary(TEST_PATH).getSpaceQuota());
dfsAdmin.clearSpaceQuota(TEST_PATH);
assertEquals(10, fs.getContentSummary(TEST_PATH).getQuota());
assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota());
dfsAdmin.clearQuota(TEST_PATH);
assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota());
assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota());
} finally {
if (fs != null) {
fs.close();
}
}
}
/**
* Make sure that a non-HDFS URI throws a helpful error.
*/
@Test(expected = IllegalArgumentException.class)
public void testHdfsAdminWithBadUri() throws IOException, URISyntaxException {
new HdfsAdmin(new URI("file:///bad-scheme"), conf);
}
}
| 3,271 | 32.731959 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.DataOutputStream;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.junit.Test;
public class TestDFSRename {
static int countLease(MiniDFSCluster cluster) {
return NameNodeAdapter.getLeaseManager(cluster.getNamesystem()).countLease();
}
final Path dir = new Path("/test/rename/");
void list(FileSystem fs, String name) throws IOException {
FileSystem.LOG.info("\n\n" + name);
for(FileStatus s : fs.listStatus(dir)) {
FileSystem.LOG.info("" + s.getPath());
}
}
static void createFile(FileSystem fs, Path f) throws IOException {
DataOutputStream a_out = fs.create(f);
a_out.writeBytes("something");
a_out.close();
}
@Test
public void testRename() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
try {
FileSystem fs = cluster.getFileSystem();
assertTrue(fs.mkdirs(dir));
{ //test lease
Path a = new Path(dir, "a");
Path aa = new Path(dir, "aa");
Path b = new Path(dir, "b");
createFile(fs, a);
//should not have any lease
assertEquals(0, countLease(cluster));
DataOutputStream aa_out = fs.create(aa);
aa_out.writeBytes("something");
//should have 1 lease
assertEquals(1, countLease(cluster));
list(fs, "rename0");
fs.rename(a, b);
list(fs, "rename1");
aa_out.writeBytes(" more");
aa_out.close();
list(fs, "rename2");
//should not have any lease
assertEquals(0, countLease(cluster));
}
{ // test non-existent destination
Path dstPath = new Path("/c/d");
assertFalse(fs.exists(dstPath));
assertFalse(fs.rename(dir, dstPath));
}
{ // dst cannot be a file or directory under src
// test rename /a/b/foo to /a/b/c
Path src = new Path("/a/b");
Path dst = new Path("/a/b/c");
createFile(fs, new Path(src, "foo"));
// dst cannot be a file under src
assertFalse(fs.rename(src, dst));
// dst cannot be a directory under src
assertFalse(fs.rename(src.getParent(), dst.getParent()));
}
{ // dst can start with src, if it is not a directory or file under src
// test rename /test /testfile
Path src = new Path("/testPrefix");
Path dst = new Path("/testPrefixfile");
createFile(fs, src);
assertTrue(fs.rename(src, dst));
}
{ // dst should not be same as src test rename /a/b/c to /a/b/c
Path src = new Path("/a/b/c");
createFile(fs, src);
assertTrue(fs.rename(src, src));
assertFalse(fs.rename(new Path("/a/b"), new Path("/a/b/")));
assertTrue(fs.rename(src, new Path("/a/b/c/")));
}
fs.delete(dir, true);
} finally {
if (cluster != null) {cluster.shutdown();}
}
}
/**
* Check the blocks of dst file are cleaned after rename with overwrite
* Restart NN to check the rename successfully
*/
@Test(timeout = 120000)
public void testRenameWithOverwrite() throws Exception {
final short replFactor = 2;
final long blockSize = 512;
Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).
numDataNodes(replFactor).build();
DistributedFileSystem dfs = cluster.getFileSystem();
try {
long fileLen = blockSize*3;
String src = "/foo/src";
String dst = "/foo/dst";
Path srcPath = new Path(src);
Path dstPath = new Path(dst);
DFSTestUtil.createFile(dfs, srcPath, fileLen, replFactor, 1);
DFSTestUtil.createFile(dfs, dstPath, fileLen, replFactor, 1);
LocatedBlocks lbs = NameNodeAdapter.getBlockLocations(
cluster.getNameNode(), dst, 0, fileLen);
BlockManager bm = NameNodeAdapter.getNamesystem(cluster.getNameNode()).
getBlockManager();
assertTrue(bm.getStoredBlock(lbs.getLocatedBlocks().get(0).getBlock().
getLocalBlock()) != null);
dfs.rename(srcPath, dstPath, Rename.OVERWRITE);
assertTrue(bm.getStoredBlock(lbs.getLocatedBlocks().get(0).getBlock().
getLocalBlock()) == null);
// Restart NN and check the rename successfully
cluster.restartNameNodes();
assertFalse(dfs.exists(srcPath));
assertTrue(dfs.exists(dstPath));
} finally {
if (dfs != null) {
dfs.close();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
}
| 6,017 | 32.620112 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.inotify.Event;
import org.apache.hadoop.hdfs.inotify.EventBatch;
import org.apache.hadoop.hdfs.inotify.MissingEventsException;
import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes;
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
import org.apache.hadoop.util.ExitUtil;
import org.junit.Assert;
import org.junit.Test;
import java.io.IOException;
import java.io.OutputStream;
import java.net.URISyntaxException;
import java.util.EnumSet;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
public class TestDFSInotifyEventInputStream {
private static final int BLOCK_SIZE = 1024;
private static final Log LOG = LogFactory.getLog(
TestDFSInotifyEventInputStream.class);
private static EventBatch waitForNextEvents(DFSInotifyEventInputStream eis)
throws IOException, MissingEventsException {
EventBatch batch = null;
while ((batch = eis.poll()) == null);
return batch;
}
private static long checkTxid(EventBatch batch, long prevTxid){
Assert.assertTrue("Previous txid " + prevTxid + " was not less than " +
"new txid " + batch.getTxid(), prevTxid < batch.getTxid());
return batch.getTxid();
}
/**
* If this test fails, check whether the newly added op should map to an
* inotify event, and if so, establish the mapping in
* {@link org.apache.hadoop.hdfs.server.namenode.InotifyFSEditLogOpTranslator}
* and update testBasic() to include the new op.
*/
@Test
public void testOpcodeCount() {
Assert.assertEquals(50, FSEditLogOpCodes.values().length);
}
/**
* Tests all FsEditLogOps that are converted to inotify events.
*/
@Test(timeout = 120000)
@SuppressWarnings("deprecation")
public void testBasic() throws IOException, URISyntaxException,
InterruptedException, MissingEventsException {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
// so that we can get an atime change
conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 1);
MiniQJMHACluster.Builder builder = new MiniQJMHACluster.Builder(conf);
builder.getDfsBuilder().numDataNodes(2);
MiniQJMHACluster cluster = builder.build();
try {
cluster.getDfsCluster().waitActive();
cluster.getDfsCluster().transitionToActive(0);
DFSClient client = new DFSClient(cluster.getDfsCluster().getNameNode(0)
.getNameNodeAddress(), conf);
FileSystem fs = cluster.getDfsCluster().getFileSystem(0);
DFSTestUtil.createFile(fs, new Path("/file"), BLOCK_SIZE, (short) 1, 0L);
DFSTestUtil.createFile(fs, new Path("/file3"), BLOCK_SIZE, (short) 1, 0L);
DFSTestUtil.createFile(fs, new Path("/file5"), BLOCK_SIZE, (short) 1, 0L);
DFSTestUtil.createFile(fs, new Path("/truncate_file"),
BLOCK_SIZE * 2, (short) 1, 0L);
DFSInotifyEventInputStream eis = client.getInotifyEventStream();
client.rename("/file", "/file4", null); // RenameOp -> RenameEvent
client.rename("/file4", "/file2"); // RenameOldOp -> RenameEvent
// DeleteOp, AddOp -> UnlinkEvent, CreateEvent
OutputStream os = client.create("/file2", true, (short) 2, BLOCK_SIZE);
os.write(new byte[BLOCK_SIZE]);
os.close(); // CloseOp -> CloseEvent
// AddOp -> AppendEvent
os = client.append("/file2", BLOCK_SIZE, EnumSet.of(CreateFlag.APPEND),
null, null);
os.write(new byte[BLOCK_SIZE]);
os.close(); // CloseOp -> CloseEvent
Thread.sleep(10); // so that the atime will get updated on the next line
client.open("/file2").read(new byte[1]); // TimesOp -> MetadataUpdateEvent
// SetReplicationOp -> MetadataUpdateEvent
client.setReplication("/file2", (short) 1);
// ConcatDeleteOp -> AppendEvent, UnlinkEvent, CloseEvent
client.concat("/file2", new String[]{"/file3"});
client.delete("/file2", false); // DeleteOp -> UnlinkEvent
client.mkdirs("/dir", null, false); // MkdirOp -> CreateEvent
// SetPermissionsOp -> MetadataUpdateEvent
client.setPermission("/dir", FsPermission.valueOf("-rw-rw-rw-"));
// SetOwnerOp -> MetadataUpdateEvent
client.setOwner("/dir", "username", "groupname");
client.createSymlink("/dir", "/dir2", false); // SymlinkOp -> CreateEvent
client.setXAttr("/file5", "user.field", "value".getBytes(), EnumSet.of(
XAttrSetFlag.CREATE)); // SetXAttrOp -> MetadataUpdateEvent
// RemoveXAttrOp -> MetadataUpdateEvent
client.removeXAttr("/file5", "user.field");
// SetAclOp -> MetadataUpdateEvent
client.setAcl("/file5", AclEntry.parseAclSpec(
"user::rwx,user:foo:rw-,group::r--,other::---", true));
client.removeAcl("/file5"); // SetAclOp -> MetadataUpdateEvent
client.rename("/file5", "/dir"); // RenameOldOp -> RenameEvent
//TruncateOp -> TruncateEvent
client.truncate("/truncate_file", BLOCK_SIZE);
EventBatch batch = null;
// RenameOp
batch = waitForNextEvents(eis);
Assert.assertEquals(1, batch.getEvents().length);
long txid = batch.getTxid();
Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.RENAME);
Event.RenameEvent re = (Event.RenameEvent) batch.getEvents()[0];
Assert.assertEquals("/file4", re.getDstPath());
Assert.assertEquals("/file", re.getSrcPath());
Assert.assertTrue(re.getTimestamp() > 0);
LOG.info(re.toString());
Assert.assertTrue(re.toString().startsWith("RenameEvent [srcPath="));
long eventsBehind = eis.getTxidsBehindEstimate();
// RenameOldOp
batch = waitForNextEvents(eis);
Assert.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.RENAME);
Event.RenameEvent re2 = (Event.RenameEvent) batch.getEvents()[0];
Assert.assertTrue(re2.getDstPath().equals("/file2"));
Assert.assertTrue(re2.getSrcPath().equals("/file4"));
Assert.assertTrue(re2.getTimestamp() > 0);
LOG.info(re2.toString());
// AddOp with overwrite
batch = waitForNextEvents(eis);
Assert.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
Event.CreateEvent ce = (Event.CreateEvent) batch.getEvents()[0];
Assert.assertTrue(ce.getiNodeType() == Event.CreateEvent.INodeType.FILE);
Assert.assertTrue(ce.getPath().equals("/file2"));
Assert.assertTrue(ce.getCtime() > 0);
Assert.assertTrue(ce.getReplication() > 0);
Assert.assertTrue(ce.getSymlinkTarget() == null);
Assert.assertTrue(ce.getOverwrite());
Assert.assertEquals(BLOCK_SIZE, ce.getDefaultBlockSize());
LOG.info(ce.toString());
Assert.assertTrue(ce.toString().startsWith("CreateEvent [INodeType="));
// CloseOp
batch = waitForNextEvents(eis);
Assert.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CLOSE);
Event.CloseEvent ce2 = (Event.CloseEvent) batch.getEvents()[0];
Assert.assertTrue(ce2.getPath().equals("/file2"));
Assert.assertTrue(ce2.getFileSize() > 0);
Assert.assertTrue(ce2.getTimestamp() > 0);
LOG.info(ce2.toString());
Assert.assertTrue(ce2.toString().startsWith("CloseEvent [path="));
// AppendOp
batch = waitForNextEvents(eis);
Assert.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.APPEND);
Event.AppendEvent append2 = (Event.AppendEvent)batch.getEvents()[0];
Assert.assertEquals("/file2", append2.getPath());
Assert.assertFalse(append2.toNewBlock());
LOG.info(append2.toString());
Assert.assertTrue(append2.toString().startsWith("AppendEvent [path="));
// CloseOp
batch = waitForNextEvents(eis);
Assert.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CLOSE);
Assert.assertTrue(((Event.CloseEvent) batch.getEvents()[0]).getPath().equals("/file2"));
// TimesOp
batch = waitForNextEvents(eis);
Assert.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
Event.MetadataUpdateEvent mue = (Event.MetadataUpdateEvent) batch.getEvents()[0];
Assert.assertTrue(mue.getPath().equals("/file2"));
Assert.assertTrue(mue.getMetadataType() ==
Event.MetadataUpdateEvent.MetadataType.TIMES);
LOG.info(mue.toString());
Assert.assertTrue(mue.toString().startsWith("MetadataUpdateEvent [path="));
// SetReplicationOp
batch = waitForNextEvents(eis);
Assert.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
Event.MetadataUpdateEvent mue2 = (Event.MetadataUpdateEvent) batch.getEvents()[0];
Assert.assertTrue(mue2.getPath().equals("/file2"));
Assert.assertTrue(mue2.getMetadataType() ==
Event.MetadataUpdateEvent.MetadataType.REPLICATION);
Assert.assertTrue(mue2.getReplication() == 1);
LOG.info(mue2.toString());
// ConcatDeleteOp
batch = waitForNextEvents(eis);
Assert.assertEquals(3, batch.getEvents().length);
txid = checkTxid(batch, txid);
Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.APPEND);
Assert.assertTrue(((Event.AppendEvent) batch.getEvents()[0]).getPath().equals("/file2"));
Assert.assertTrue(batch.getEvents()[1].getEventType() == Event.EventType.UNLINK);
Event.UnlinkEvent ue2 = (Event.UnlinkEvent) batch.getEvents()[1];
Assert.assertTrue(ue2.getPath().equals("/file3"));
Assert.assertTrue(ue2.getTimestamp() > 0);
LOG.info(ue2.toString());
Assert.assertTrue(ue2.toString().startsWith("UnlinkEvent [path="));
Assert.assertTrue(batch.getEvents()[2].getEventType() == Event.EventType.CLOSE);
Event.CloseEvent ce3 = (Event.CloseEvent) batch.getEvents()[2];
Assert.assertTrue(ce3.getPath().equals("/file2"));
Assert.assertTrue(ce3.getTimestamp() > 0);
// DeleteOp
batch = waitForNextEvents(eis);
Assert.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.UNLINK);
Event.UnlinkEvent ue = (Event.UnlinkEvent) batch.getEvents()[0];
Assert.assertTrue(ue.getPath().equals("/file2"));
Assert.assertTrue(ue.getTimestamp() > 0);
LOG.info(ue.toString());
// MkdirOp
batch = waitForNextEvents(eis);
Assert.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
Event.CreateEvent ce4 = (Event.CreateEvent) batch.getEvents()[0];
Assert.assertTrue(ce4.getiNodeType() ==
Event.CreateEvent.INodeType.DIRECTORY);
Assert.assertTrue(ce4.getPath().equals("/dir"));
Assert.assertTrue(ce4.getCtime() > 0);
Assert.assertTrue(ce4.getReplication() == 0);
Assert.assertTrue(ce4.getSymlinkTarget() == null);
LOG.info(ce4.toString());
// SetPermissionsOp
batch = waitForNextEvents(eis);
Assert.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
Event.MetadataUpdateEvent mue3 = (Event.MetadataUpdateEvent) batch.getEvents()[0];
Assert.assertTrue(mue3.getPath().equals("/dir"));
Assert.assertTrue(mue3.getMetadataType() ==
Event.MetadataUpdateEvent.MetadataType.PERMS);
Assert.assertTrue(mue3.getPerms().toString().contains("rw-rw-rw-"));
LOG.info(mue3.toString());
// SetOwnerOp
batch = waitForNextEvents(eis);
Assert.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
Event.MetadataUpdateEvent mue4 = (Event.MetadataUpdateEvent) batch.getEvents()[0];
Assert.assertTrue(mue4.getPath().equals("/dir"));
Assert.assertTrue(mue4.getMetadataType() ==
Event.MetadataUpdateEvent.MetadataType.OWNER);
Assert.assertTrue(mue4.getOwnerName().equals("username"));
Assert.assertTrue(mue4.getGroupName().equals("groupname"));
LOG.info(mue4.toString());
// SymlinkOp
batch = waitForNextEvents(eis);
Assert.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
Event.CreateEvent ce5 = (Event.CreateEvent) batch.getEvents()[0];
Assert.assertTrue(ce5.getiNodeType() ==
Event.CreateEvent.INodeType.SYMLINK);
Assert.assertTrue(ce5.getPath().equals("/dir2"));
Assert.assertTrue(ce5.getCtime() > 0);
Assert.assertTrue(ce5.getReplication() == 0);
Assert.assertTrue(ce5.getSymlinkTarget().equals("/dir"));
LOG.info(ce5.toString());
// SetXAttrOp
batch = waitForNextEvents(eis);
Assert.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
Event.MetadataUpdateEvent mue5 = (Event.MetadataUpdateEvent) batch.getEvents()[0];
Assert.assertTrue(mue5.getPath().equals("/file5"));
Assert.assertTrue(mue5.getMetadataType() ==
Event.MetadataUpdateEvent.MetadataType.XATTRS);
Assert.assertTrue(mue5.getxAttrs().size() == 1);
Assert.assertTrue(mue5.getxAttrs().get(0).getName().contains("field"));
Assert.assertTrue(!mue5.isxAttrsRemoved());
LOG.info(mue5.toString());
// RemoveXAttrOp
batch = waitForNextEvents(eis);
Assert.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
Event.MetadataUpdateEvent mue6 = (Event.MetadataUpdateEvent) batch.getEvents()[0];
Assert.assertTrue(mue6.getPath().equals("/file5"));
Assert.assertTrue(mue6.getMetadataType() ==
Event.MetadataUpdateEvent.MetadataType.XATTRS);
Assert.assertTrue(mue6.getxAttrs().size() == 1);
Assert.assertTrue(mue6.getxAttrs().get(0).getName().contains("field"));
Assert.assertTrue(mue6.isxAttrsRemoved());
LOG.info(mue6.toString());
// SetAclOp (1)
batch = waitForNextEvents(eis);
Assert.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
Event.MetadataUpdateEvent mue7 = (Event.MetadataUpdateEvent) batch.getEvents()[0];
Assert.assertTrue(mue7.getPath().equals("/file5"));
Assert.assertTrue(mue7.getMetadataType() ==
Event.MetadataUpdateEvent.MetadataType.ACLS);
Assert.assertTrue(mue7.getAcls().contains(
AclEntry.parseAclEntry("user::rwx", true)));
LOG.info(mue7.toString());
// SetAclOp (2)
batch = waitForNextEvents(eis);
Assert.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
Event.MetadataUpdateEvent mue8 = (Event.MetadataUpdateEvent) batch.getEvents()[0];
Assert.assertTrue(mue8.getPath().equals("/file5"));
Assert.assertTrue(mue8.getMetadataType() ==
Event.MetadataUpdateEvent.MetadataType.ACLS);
Assert.assertTrue(mue8.getAcls() == null);
LOG.info(mue8.toString());
// RenameOp (2)
batch = waitForNextEvents(eis);
Assert.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.RENAME);
Event.RenameEvent re3 = (Event.RenameEvent) batch.getEvents()[0];
Assert.assertTrue(re3.getDstPath().equals("/dir/file5"));
Assert.assertTrue(re3.getSrcPath().equals("/file5"));
Assert.assertTrue(re3.getTimestamp() > 0);
LOG.info(re3.toString());
// TruncateOp
batch = waitForNextEvents(eis);
Assert.assertEquals(1, batch.getEvents().length);
txid = checkTxid(batch, txid);
Assert
.assertTrue(batch.getEvents()[0].getEventType() ==
Event.EventType.TRUNCATE);
Event.TruncateEvent et = ((Event.TruncateEvent) batch.getEvents()[0]);
Assert.assertTrue(et.getPath().equals("/truncate_file"));
Assert.assertTrue(et.getFileSize() == BLOCK_SIZE);
Assert.assertTrue(et.getTimestamp() > 0);
LOG.info(et.toString());
Assert.assertTrue(et.toString().startsWith("TruncateEvent [path="));
// Returns null when there are no further events
Assert.assertTrue(eis.poll() == null);
// make sure the estimate hasn't changed since the above assertion
// tells us that we are fully caught up to the current namesystem state
// and we should not have been behind at all when eventsBehind was set
// either, since there were few enough events that they should have all
// been read to the client during the first poll() call
Assert.assertTrue(eis.getTxidsBehindEstimate() == eventsBehind);
} finally {
cluster.shutdown();
}
}
@Test(timeout = 120000)
public void testNNFailover() throws IOException, URISyntaxException,
MissingEventsException {
Configuration conf = new HdfsConfiguration();
MiniQJMHACluster cluster = new MiniQJMHACluster.Builder(conf).build();
try {
cluster.getDfsCluster().waitActive();
cluster.getDfsCluster().transitionToActive(0);
DFSClient client = ((DistributedFileSystem) HATestUtil.configureFailoverFs
(cluster.getDfsCluster(), conf)).dfs;
DFSInotifyEventInputStream eis = client.getInotifyEventStream();
for (int i = 0; i < 10; i++) {
client.mkdirs("/dir" + i, null, false);
}
cluster.getDfsCluster().shutdownNameNode(0);
cluster.getDfsCluster().transitionToActive(1);
EventBatch batch = null;
// we can read all of the edits logged by the old active from the new
// active
for (int i = 0; i < 10; i++) {
batch = waitForNextEvents(eis);
Assert.assertEquals(1, batch.getEvents().length);
Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
Assert.assertTrue(((Event.CreateEvent) batch.getEvents()[0]).getPath().equals("/dir" +
i));
}
Assert.assertTrue(eis.poll() == null);
} finally {
cluster.shutdown();
}
}
@Test(timeout = 120000)
public void testTwoActiveNNs() throws IOException, MissingEventsException {
Configuration conf = new HdfsConfiguration();
MiniQJMHACluster cluster = new MiniQJMHACluster.Builder(conf).build();
try {
cluster.getDfsCluster().waitActive();
cluster.getDfsCluster().transitionToActive(0);
DFSClient client0 = new DFSClient(cluster.getDfsCluster().getNameNode(0)
.getNameNodeAddress(), conf);
DFSClient client1 = new DFSClient(cluster.getDfsCluster().getNameNode(1)
.getNameNodeAddress(), conf);
DFSInotifyEventInputStream eis = client0.getInotifyEventStream();
for (int i = 0; i < 10; i++) {
client0.mkdirs("/dir" + i, null, false);
}
cluster.getDfsCluster().transitionToActive(1);
for (int i = 10; i < 20; i++) {
client1.mkdirs("/dir" + i, null, false);
}
// make sure that the old active can't read any further than the edits
// it logged itself (it has no idea whether the in-progress edits from
// the other writer have actually been committed)
EventBatch batch = null;
for (int i = 0; i < 10; i++) {
batch = waitForNextEvents(eis);
Assert.assertEquals(1, batch.getEvents().length);
Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
Assert.assertTrue(((Event.CreateEvent) batch.getEvents()[0]).getPath().equals("/dir" +
i));
}
Assert.assertTrue(eis.poll() == null);
} finally {
try {
cluster.shutdown();
} catch (ExitUtil.ExitException e) {
// expected because the old active will be unable to flush the
// end-of-segment op since it is fenced
}
}
}
@Test(timeout = 120000)
public void testReadEventsWithTimeout() throws IOException,
InterruptedException, MissingEventsException {
Configuration conf = new HdfsConfiguration();
MiniQJMHACluster cluster = new MiniQJMHACluster.Builder(conf).build();
try {
cluster.getDfsCluster().waitActive();
cluster.getDfsCluster().transitionToActive(0);
final DFSClient client = new DFSClient(cluster.getDfsCluster()
.getNameNode(0).getNameNodeAddress(), conf);
DFSInotifyEventInputStream eis = client.getInotifyEventStream();
ScheduledExecutorService ex = Executors
.newSingleThreadScheduledExecutor();
ex.schedule(new Runnable() {
@Override
public void run() {
try {
client.mkdirs("/dir", null, false);
} catch (IOException e) {
// test will fail
LOG.error("Unable to create /dir", e);
}
}
}, 1, TimeUnit.SECONDS);
// a very generous wait period -- the edit will definitely have been
// processed by the time this is up
EventBatch batch = eis.poll(5, TimeUnit.SECONDS);
Assert.assertNotNull(batch);
Assert.assertEquals(1, batch.getEvents().length);
Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
Assert.assertEquals("/dir", ((Event.CreateEvent) batch.getEvents()[0]).getPath());
} finally {
cluster.shutdown();
}
}
}
| 23,925 | 44.40038 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetFileChecksum.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.fail;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileChecksum;
import org.apache.hadoop.fs.Path;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
public class TestGetFileChecksum {
private static final int BLOCKSIZE = 1024;
private static final short REPLICATION = 3;
private Configuration conf;
private MiniDFSCluster cluster;
private DistributedFileSystem dfs;
@Before
public void setUp() throws Exception {
conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
.build();
cluster.waitActive();
dfs = cluster.getFileSystem();
}
@After
public void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
public void testGetFileChecksum(final Path foo, final int appendLength)
throws Exception {
final int appendRounds = 16;
FileChecksum[] fc = new FileChecksum[appendRounds + 1];
DFSTestUtil.createFile(dfs, foo, appendLength, REPLICATION, 0L);
fc[0] = dfs.getFileChecksum(foo);
for (int i = 0; i < appendRounds; i++) {
DFSTestUtil.appendFile(dfs, foo, appendLength);
fc[i + 1] = dfs.getFileChecksum(foo);
}
for (int i = 0; i < appendRounds + 1; i++) {
FileChecksum checksum = dfs.getFileChecksum(foo, appendLength * (i+1));
Assert.assertTrue(checksum.equals(fc[i]));
}
}
@Test
public void testGetFileChecksumForBlocksUnderConstruction() {
try {
FSDataOutputStream file = dfs.create(new Path("/testFile"));
file.write("Performance Testing".getBytes());
dfs.getFileChecksum(new Path("/testFile"));
fail("getFileChecksum should fail for files "
+ "with blocks under construction");
} catch (IOException ie) {
Assert.assertTrue(ie.getMessage().contains(
"Fail to get checksum, since file /testFile "
+ "is under construction."));
}
}
@Test
public void testGetFileChecksum() throws Exception {
testGetFileChecksum(new Path("/foo"), BLOCKSIZE / 4);
testGetFileChecksum(new Path("/bar"), BLOCKSIZE / 4 - 1);
}
}
| 3,189 | 32.578947 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FileReader;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.TreeMap;
import java.util.zip.CRC32;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormat;
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.log4j.Logger;
import org.junit.Test;
/**
* This tests data transfer protocol handling in the Datanode. It sends
* various forms of wrong data and verifies that Datanode handles it well.
*
* This test uses the following items from src/test/.../dfs directory :
* 1) hadoop-22-dfs-dir.tgz and other tarred pre-upgrade NN / DN
* directory images
* 2) hadoop-dfs-dir.txt : checksums that are compared in this test.
* Please read hadoop-dfs-dir.txt for more information.
*/
public class TestDFSUpgradeFromImage {
private static final Log LOG = LogFactory
.getLog(TestDFSUpgradeFromImage.class);
private static final File TEST_ROOT_DIR =
new File(MiniDFSCluster.getBaseDirectory());
private static final String HADOOP_DFS_DIR_TXT = "hadoop-dfs-dir.txt";
private static final String HADOOP22_IMAGE = "hadoop-22-dfs-dir.tgz";
private static final String HADOOP1_BBW_IMAGE = "hadoop1-bbw.tgz";
private static final String HADOOP1_RESERVED_IMAGE = "hadoop-1-reserved.tgz";
private static final String HADOOP023_RESERVED_IMAGE =
"hadoop-0.23-reserved.tgz";
private static final String HADOOP2_RESERVED_IMAGE = "hadoop-2-reserved.tgz";
private static class ReferenceFileInfo {
String path;
long checksum;
}
static final Configuration upgradeConf;
static {
upgradeConf = new HdfsConfiguration();
upgradeConf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1); // block scanning off
if (System.getProperty("test.build.data") == null) { // to allow test to be run outside of Maven
System.setProperty("test.build.data", "build/test/data");
}
}
public interface ClusterVerifier {
public void verifyClusterPostUpgrade(final MiniDFSCluster cluster) throws IOException;
}
final LinkedList<ReferenceFileInfo> refList = new LinkedList<ReferenceFileInfo>();
Iterator<ReferenceFileInfo> refIter;
boolean printChecksum = false;
void unpackStorage(String tarFileName, String referenceName)
throws IOException {
String tarFile = System.getProperty("test.cache.data", "build/test/cache")
+ "/" + tarFileName;
String dataDir = System.getProperty("test.build.data", "build/test/data");
File dfsDir = new File(dataDir, "dfs");
if ( dfsDir.exists() && !FileUtil.fullyDelete(dfsDir) ) {
throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
}
LOG.info("Unpacking " + tarFile);
FileUtil.unTar(new File(tarFile), new File(dataDir));
//Now read the reference info
BufferedReader reader = new BufferedReader(new FileReader(
System.getProperty("test.cache.data", "build/test/cache")
+ "/" + referenceName));
String line;
while ( (line = reader.readLine()) != null ) {
line = line.trim();
if (line.length() <= 0 || line.startsWith("#")) {
continue;
}
String[] arr = line.split("\\s+");
if (arr.length < 1) {
continue;
}
if (arr[0].equals("printChecksums")) {
printChecksum = true;
break;
}
if (arr.length < 2) {
continue;
}
ReferenceFileInfo info = new ReferenceFileInfo();
info.path = arr[0];
info.checksum = Long.parseLong(arr[1]);
refList.add(info);
}
reader.close();
}
private void verifyChecksum(String path, long checksum) throws IOException {
if ( refIter == null ) {
refIter = refList.iterator();
}
if ( printChecksum ) {
LOG.info("CRC info for reference file : " + path + " \t " + checksum);
} else {
if ( !refIter.hasNext() ) {
throw new IOException("Checking checksum for " + path +
"Not enough elements in the refList");
}
ReferenceFileInfo info = refIter.next();
// The paths are expected to be listed in the same order
// as they are traversed here.
assertEquals(info.path, path);
assertEquals("Checking checksum for " + path, info.checksum, checksum);
}
}
/**
* Try to open a file for reading several times.
*
* If we fail because lease recovery hasn't completed, retry the open.
*/
private static FSInputStream dfsOpenFileWithRetries(DistributedFileSystem dfs,
String pathName) throws IOException {
IOException exc = null;
for (int tries = 0; tries < 10; tries++) {
try {
return dfs.dfs.open(pathName);
} catch (IOException e) {
exc = e;
}
if (!exc.getMessage().contains("Cannot obtain " +
"block length for LocatedBlock")) {
throw exc;
}
try {
Thread.sleep(1000);
} catch (InterruptedException ignored) {}
}
throw exc;
}
private void verifyDir(DistributedFileSystem dfs, Path dir,
CRC32 overallChecksum) throws IOException {
FileStatus[] fileArr = dfs.listStatus(dir);
TreeMap<Path, Boolean> fileMap = new TreeMap<Path, Boolean>();
for(FileStatus file : fileArr) {
fileMap.put(file.getPath(), Boolean.valueOf(file.isDirectory()));
}
for(Iterator<Path> it = fileMap.keySet().iterator(); it.hasNext();) {
Path path = it.next();
boolean isDir = fileMap.get(path);
String pathName = path.toUri().getPath();
overallChecksum.update(pathName.getBytes());
if ( isDir ) {
verifyDir(dfs, path, overallChecksum);
} else {
// this is not a directory. Checksum the file data.
CRC32 fileCRC = new CRC32();
FSInputStream in = dfsOpenFileWithRetries(dfs, pathName);
byte[] buf = new byte[4096];
int nRead = 0;
while ( (nRead = in.read(buf, 0, buf.length)) > 0 ) {
fileCRC.update(buf, 0, nRead);
}
verifyChecksum(pathName, fileCRC.getValue());
}
}
}
private void verifyFileSystem(DistributedFileSystem dfs) throws IOException {
CRC32 overallChecksum = new CRC32();
verifyDir(dfs, new Path("/"), overallChecksum);
verifyChecksum("overallCRC", overallChecksum.getValue());
if ( printChecksum ) {
throw new IOException("Checksums are written to log as requested. " +
"Throwing this exception to force an error " +
"for this test.");
}
}
/**
* Test that sets up a fake image from Hadoop 0.3.0 and tries to start a
* NN, verifying that the correct error message is thrown.
*/
@Test
public void testFailOnPreUpgradeImage() throws IOException {
Configuration conf = new HdfsConfiguration();
File namenodeStorage = new File(TEST_ROOT_DIR, "nnimage-0.3.0");
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, namenodeStorage.toString());
// Set up a fake NN storage that looks like an ancient Hadoop dir circa 0.3.0
FileUtil.fullyDelete(namenodeStorage);
assertTrue("Make " + namenodeStorage, namenodeStorage.mkdirs());
File imageDir = new File(namenodeStorage, "image");
assertTrue("Make " + imageDir, imageDir.mkdirs());
// Hex dump of a formatted image from Hadoop 0.3.0
File imageFile = new File(imageDir, "fsimage");
byte[] imageBytes = StringUtils.hexStringToByte(
"fffffffee17c0d2700000000");
FileOutputStream fos = new FileOutputStream(imageFile);
try {
fos.write(imageBytes);
} finally {
fos.close();
}
// Now try to start an NN from it
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
.format(false)
.manageDataDfsDirs(false)
.manageNameDfsDirs(false)
.startupOption(StartupOption.REGULAR)
.build();
fail("Was able to start NN from 0.3.0 image");
} catch (IOException ioe) {
if (!ioe.toString().contains("Old layout version is 'too old'")) {
throw ioe;
}
} finally {
// We expect startup to fail, but just in case it didn't, shutdown now.
if (cluster != null) {
cluster.shutdown();
}
}
}
/**
* Test upgrade from 0.22 image
*/
@Test
public void testUpgradeFromRel22Image() throws IOException {
unpackStorage(HADOOP22_IMAGE, HADOOP_DFS_DIR_TXT);
upgradeAndVerify(new MiniDFSCluster.Builder(upgradeConf).
numDataNodes(4), null);
}
/**
* Test upgrade from 0.22 image with corrupt md5, make sure it
* fails to upgrade
*/
@Test
public void testUpgradeFromCorruptRel22Image() throws IOException {
unpackStorage(HADOOP22_IMAGE, HADOOP_DFS_DIR_TXT);
// Overwrite the md5 stored in the VERSION files
File baseDir = new File(MiniDFSCluster.getBaseDirectory());
FSImageTestUtil.corruptVersionFile(
new File(baseDir, "name1/current/VERSION"),
"imageMD5Digest", "22222222222222222222222222222222");
FSImageTestUtil.corruptVersionFile(
new File(baseDir, "name2/current/VERSION"),
"imageMD5Digest", "22222222222222222222222222222222");
// Attach our own log appender so we can verify output
final LogVerificationAppender appender = new LogVerificationAppender();
final Logger logger = Logger.getRootLogger();
logger.addAppender(appender);
// Upgrade should now fail
try {
upgradeAndVerify(new MiniDFSCluster.Builder(upgradeConf).
numDataNodes(4), null);
fail("Upgrade did not fail with bad MD5");
} catch (IOException ioe) {
String msg = StringUtils.stringifyException(ioe);
if (!msg.contains("Failed to load an FSImage file")) {
throw ioe;
}
int md5failures = appender.countExceptionsWithMessage(
" is corrupt with MD5 checksum of ");
assertEquals("Upgrade did not fail with bad MD5", 1, md5failures);
}
}
/**
* Test upgrade from a branch-1.2 image with reserved paths
*/
@Test
public void testUpgradeFromRel1ReservedImage() throws Exception {
unpackStorage(HADOOP1_RESERVED_IMAGE, HADOOP_DFS_DIR_TXT);
MiniDFSCluster cluster = null;
// Try it once without setting the upgrade flag to ensure it fails
final Configuration conf = new Configuration();
// Try it again with a custom rename string
try {
FSImageFormat.setRenameReservedPairs(
".snapshot=.user-snapshot," +
".reserved=.my-reserved");
cluster =
new MiniDFSCluster.Builder(conf)
.format(false)
.startupOption(StartupOption.UPGRADE)
.numDataNodes(0).build();
DistributedFileSystem dfs = cluster.getFileSystem();
// Make sure the paths were renamed as expected
// Also check that paths are present after a restart, checks that the
// upgraded fsimage has the same state.
final String[] expected = new String[] {
"/.my-reserved",
"/.user-snapshot",
"/.user-snapshot/.user-snapshot",
"/.user-snapshot/open",
"/dir1",
"/dir1/.user-snapshot",
"/dir2",
"/dir2/.user-snapshot",
"/user",
"/user/andrew",
"/user/andrew/.user-snapshot",
};
for (int i=0; i<2; i++) {
// Restart the second time through this loop
if (i==1) {
cluster.finalizeCluster(conf);
cluster.restartNameNode(true);
}
ArrayList<Path> toList = new ArrayList<Path>();
toList.add(new Path("/"));
ArrayList<String> found = new ArrayList<String>();
while (!toList.isEmpty()) {
Path p = toList.remove(0);
FileStatus[] statuses = dfs.listStatus(p);
for (FileStatus status: statuses) {
final String path = status.getPath().toUri().getPath();
System.out.println("Found path " + path);
found.add(path);
if (status.isDirectory()) {
toList.add(status.getPath());
}
}
}
for (String s: expected) {
assertTrue("Did not find expected path " + s, found.contains(s));
}
assertEquals("Found an unexpected path while listing filesystem",
found.size(), expected.length);
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
/**
* Test upgrade from a 0.23.11 image with reserved paths
*/
@Test
public void testUpgradeFromRel023ReservedImage() throws Exception {
unpackStorage(HADOOP023_RESERVED_IMAGE, HADOOP_DFS_DIR_TXT);
MiniDFSCluster cluster = null;
// Try it once without setting the upgrade flag to ensure it fails
final Configuration conf = new Configuration();
// Try it again with a custom rename string
try {
FSImageFormat.setRenameReservedPairs(
".snapshot=.user-snapshot," +
".reserved=.my-reserved");
cluster =
new MiniDFSCluster.Builder(conf)
.format(false)
.startupOption(StartupOption.UPGRADE)
.numDataNodes(0).build();
DistributedFileSystem dfs = cluster.getFileSystem();
// Make sure the paths were renamed as expected
// Also check that paths are present after a restart, checks that the
// upgraded fsimage has the same state.
final String[] expected = new String[] {
"/.user-snapshot",
"/dir1",
"/dir1/.user-snapshot",
"/dir2",
"/dir2/.user-snapshot"
};
for (int i=0; i<2; i++) {
// Restart the second time through this loop
if (i==1) {
cluster.finalizeCluster(conf);
cluster.restartNameNode(true);
}
ArrayList<Path> toList = new ArrayList<Path>();
toList.add(new Path("/"));
ArrayList<String> found = new ArrayList<String>();
while (!toList.isEmpty()) {
Path p = toList.remove(0);
FileStatus[] statuses = dfs.listStatus(p);
for (FileStatus status: statuses) {
final String path = status.getPath().toUri().getPath();
System.out.println("Found path " + path);
found.add(path);
if (status.isDirectory()) {
toList.add(status.getPath());
}
}
}
for (String s: expected) {
assertTrue("Did not find expected path " + s, found.contains(s));
}
assertEquals("Found an unexpected path while listing filesystem",
found.size(), expected.length);
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
/**
* Test upgrade from 2.0 image with a variety of .snapshot and .reserved
* paths to test renaming on upgrade
*/
@Test
public void testUpgradeFromRel2ReservedImage() throws Exception {
unpackStorage(HADOOP2_RESERVED_IMAGE, HADOOP_DFS_DIR_TXT);
MiniDFSCluster cluster = null;
// Try it once without setting the upgrade flag to ensure it fails
final Configuration conf = new Configuration();
try {
cluster =
new MiniDFSCluster.Builder(conf)
.format(false)
.startupOption(StartupOption.UPGRADE)
.numDataNodes(0).build();
} catch (IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains(
"reserved path component in this version",
e);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
// Try it again with a custom rename string
try {
FSImageFormat.setRenameReservedPairs(
".snapshot=.user-snapshot," +
".reserved=.my-reserved");
cluster =
new MiniDFSCluster.Builder(conf)
.format(false)
.startupOption(StartupOption.UPGRADE)
.numDataNodes(0).build();
DistributedFileSystem dfs = cluster.getFileSystem();
// Make sure the paths were renamed as expected
// Also check that paths are present after a restart, checks that the
// upgraded fsimage has the same state.
final String[] expected = new String[] {
"/edits",
"/edits/.reserved",
"/edits/.user-snapshot",
"/edits/.user-snapshot/editsdir",
"/edits/.user-snapshot/editsdir/editscontents",
"/edits/.user-snapshot/editsdir/editsdir2",
"/image",
"/image/.reserved",
"/image/.user-snapshot",
"/image/.user-snapshot/imagedir",
"/image/.user-snapshot/imagedir/imagecontents",
"/image/.user-snapshot/imagedir/imagedir2",
"/.my-reserved",
"/.my-reserved/edits-touch",
"/.my-reserved/image-touch"
};
for (int i=0; i<2; i++) {
// Restart the second time through this loop
if (i==1) {
cluster.finalizeCluster(conf);
cluster.restartNameNode(true);
}
ArrayList<Path> toList = new ArrayList<Path>();
toList.add(new Path("/"));
ArrayList<String> found = new ArrayList<String>();
while (!toList.isEmpty()) {
Path p = toList.remove(0);
FileStatus[] statuses = dfs.listStatus(p);
for (FileStatus status: statuses) {
final String path = status.getPath().toUri().getPath();
System.out.println("Found path " + path);
found.add(path);
if (status.isDirectory()) {
toList.add(status.getPath());
}
}
}
for (String s: expected) {
assertTrue("Did not find expected path " + s, found.contains(s));
}
assertEquals("Found an unexpected path while listing filesystem",
found.size(), expected.length);
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
static void recoverAllLeases(DFSClient dfs,
Path path) throws IOException {
String pathStr = path.toString();
HdfsFileStatus status = dfs.getFileInfo(pathStr);
if (!status.isDir()) {
dfs.recoverLease(pathStr);
return;
}
byte prev[] = HdfsFileStatus.EMPTY_NAME;
DirectoryListing dirList;
do {
dirList = dfs.listPaths(pathStr, prev);
HdfsFileStatus files[] = dirList.getPartialListing();
for (HdfsFileStatus f : files) {
recoverAllLeases(dfs, f.getFullPath(path));
}
prev = dirList.getLastName();
} while (dirList.hasMore());
}
void upgradeAndVerify(MiniDFSCluster.Builder bld, ClusterVerifier verifier)
throws IOException {
MiniDFSCluster cluster = null;
try {
bld.format(false).startupOption(StartupOption.UPGRADE)
.clusterId("testClusterId");
cluster = bld.build();
cluster.waitActive();
DistributedFileSystem dfs = cluster.getFileSystem();
DFSClient dfsClient = dfs.dfs;
//Safemode will be off only after upgrade is complete. Wait for it.
while ( dfsClient.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_GET) ) {
LOG.info("Waiting for SafeMode to be OFF.");
try {
Thread.sleep(1000);
} catch (InterruptedException ignored) {}
}
recoverAllLeases(dfsClient, new Path("/"));
verifyFileSystem(dfs);
if (verifier != null) {
verifier.verifyClusterPostUpgrade(cluster);
}
} finally {
if (cluster != null) { cluster.shutdown(); }
}
}
/**
* Test upgrade from a 1.x image with some blocksBeingWritten
*/
@Test
public void testUpgradeFromRel1BBWImage() throws IOException {
unpackStorage(HADOOP1_BBW_IMAGE, HADOOP_DFS_DIR_TXT);
Configuration conf = new Configuration(upgradeConf);
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
System.getProperty("test.build.data") + File.separator +
"dfs" + File.separator +
"data" + File.separator +
"data1");
upgradeAndVerify(new MiniDFSCluster.Builder(conf).
numDataNodes(1).enableManagedDfsDirsRedundancy(false).
manageDataDfsDirs(false), null);
}
}
| 22,034 | 34.3125 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeStartupFixesLegacyStorageIDs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Test;
import java.io.File;
import java.io.IOException;
import org.apache.hadoop.hdfs.TestDFSUpgradeFromImage.ClusterVerifier;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
/**
* The test verifies that legacy storage IDs in older DataNode
* images are replaced with UUID-based storage IDs. The startup may
* or may not involve a Datanode Layout upgrade. Each test case uses
* the following resource files.
*
* 1. testCaseName.tgz - NN and DN directories corresponding
* to a specific layout version.
* 2. testCaseName.txt - Text file listing the checksum of each file
* in the cluster and overall checksum. See
* TestUpgradeFromImage for the file format.
*
* If any test case is renamed then the corresponding resource files must
* also be renamed.
*/
public class TestDatanodeStartupFixesLegacyStorageIDs {
/**
* Perform a upgrade using the test image corresponding to
* testCaseName.
*
* @param testCaseName
* @param expectedStorageId if null, then the upgrade generates a new
* unique storage ID.
* @throws IOException
*/
private static void runLayoutUpgradeTest(final String testCaseName,
final String expectedStorageId)
throws IOException {
TestDFSUpgradeFromImage upgrade = new TestDFSUpgradeFromImage();
upgrade.unpackStorage(testCaseName + ".tgz", testCaseName + ".txt");
Configuration conf = new Configuration(TestDFSUpgradeFromImage.upgradeConf);
initStorageDirs(conf, testCaseName);
upgradeAndVerify(upgrade, conf, new ClusterVerifier() {
@Override
public void verifyClusterPostUpgrade(MiniDFSCluster cluster) throws IOException {
// Verify that a GUID-based storage ID was generated.
final String bpid = cluster.getNamesystem().getBlockPoolId();
StorageReport[] reports =
cluster.getDataNodes().get(0).getFSDataset().getStorageReports(bpid);
assertThat(reports.length, is(1));
final String storageID = reports[0].getStorage().getStorageID();
assertTrue(DatanodeStorage.isValidStorageId(storageID));
if (expectedStorageId != null) {
assertThat(storageID, is(expectedStorageId));
}
}
});
}
private static void initStorageDirs(final Configuration conf,
final String testName) {
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
System.getProperty("test.build.data") + File.separator +
testName + File.separator + "dfs" + File.separator + "data");
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
System.getProperty("test.build.data") + File.separator +
testName + File.separator + "dfs" + File.separator + "name");
}
private static void upgradeAndVerify(final TestDFSUpgradeFromImage upgrade,
final Configuration conf,
final ClusterVerifier verifier)
throws IOException{
upgrade.upgradeAndVerify(new MiniDFSCluster.Builder(conf)
.numDataNodes(1)
.manageDataDfsDirs(false)
.manageNameDfsDirs(false), verifier);
}
/**
* Upgrade from 2.2 (no storage IDs per volume) correctly generates
* GUID-based storage IDs. Test case for HDFS-7575.
*/
@Test (timeout=300000)
public void testUpgradeFrom22FixesStorageIDs() throws IOException {
runLayoutUpgradeTest(GenericTestUtils.getMethodName(), null);
}
/**
* Startup from a 2.6-layout that has legacy storage IDs correctly
* generates new storage IDs.
* Test case for HDFS-7575.
*/
@Test (timeout=300000)
public void testUpgradeFrom22via26FixesStorageIDs() throws IOException {
runLayoutUpgradeTest(GenericTestUtils.getMethodName(), null);
}
/**
* Startup from a 2.6-layout that already has unique storage IDs does
* not regenerate the storage IDs.
* Test case for HDFS-7575.
*/
@Test (timeout=300000)
public void testUpgradeFrom26PreservesStorageIDs() throws IOException {
// StorageId present in the image testUpgradeFrom26PreservesStorageId.tgz
runLayoutUpgradeTest(GenericTestUtils.getMethodName(),
"DS-a0e39cfa-930f-4abd-813c-e22b59223774");
}
}
| 5,592 | 38.95 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CONTEXT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.InputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
import org.apache.hadoop.hdfs.net.Peer;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.ReflectionUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import com.google.common.base.Supplier;
public class TestDataTransferKeepalive {
final Configuration conf = new HdfsConfiguration();
private MiniDFSCluster cluster;
private DataNode dn;
private static final Path TEST_FILE = new Path("/test");
private static final int KEEPALIVE_TIMEOUT = 1000;
private static final int WRITE_TIMEOUT = 3000;
@Before
public void setup() throws Exception {
conf.setInt(DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY,
KEEPALIVE_TIMEOUT);
conf.setInt(DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY,
0);
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(1).build();
dn = cluster.getDataNodes().get(0);
}
@After
public void teardown() {
cluster.shutdown();
}
/**
* Regression test for HDFS-3357. Check that the datanode is respecting
* its configured keepalive timeout.
*/
@Test(timeout=30000)
public void testDatanodeRespectsKeepAliveTimeout() throws Exception {
Configuration clientConf = new Configuration(conf);
// Set a client socket cache expiry time much longer than
// the datanode-side expiration time.
final long CLIENT_EXPIRY_MS = 60000L;
clientConf.setLong(DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY, CLIENT_EXPIRY_MS);
clientConf.set(DFS_CLIENT_CONTEXT, "testDatanodeRespectsKeepAliveTimeout");
DistributedFileSystem fs =
(DistributedFileSystem)FileSystem.get(cluster.getURI(),
clientConf);
PeerCache peerCache = ClientContext.getFromConf(clientConf).getPeerCache();
DFSTestUtil.createFile(fs, TEST_FILE, 1L, (short)1, 0L);
// Clients that write aren't currently re-used.
assertEquals(0, peerCache.size());
assertXceiverCount(0);
// Reads the file, so we should get a
// cached socket, and should have an xceiver on the other side.
DFSTestUtil.readFile(fs, TEST_FILE);
assertEquals(1, peerCache.size());
assertXceiverCount(1);
// Sleep for a bit longer than the keepalive timeout
// and make sure the xceiver died.
Thread.sleep(DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT + 50);
assertXceiverCount(0);
// The socket is still in the cache, because we don't
// notice that it's closed until we try to read
// from it again.
assertEquals(1, peerCache.size());
// Take it out of the cache - reading should
// give an EOF.
Peer peer = peerCache.get(dn.getDatanodeId(), false);
assertNotNull(peer);
assertEquals(-1, peer.getInputStream().read());
}
/**
* Test that the client respects its keepalive timeout.
*/
@Test(timeout=30000)
public void testClientResponsesKeepAliveTimeout() throws Exception {
Configuration clientConf = new Configuration(conf);
// Set a client socket cache expiry time much shorter than
// the datanode-side expiration time.
final long CLIENT_EXPIRY_MS = 10L;
clientConf.setLong(DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY, CLIENT_EXPIRY_MS);
clientConf.set(DFS_CLIENT_CONTEXT, "testClientResponsesKeepAliveTimeout");
DistributedFileSystem fs =
(DistributedFileSystem)FileSystem.get(cluster.getURI(),
clientConf);
PeerCache peerCache = ClientContext.getFromConf(clientConf).getPeerCache();
DFSTestUtil.createFile(fs, TEST_FILE, 1L, (short)1, 0L);
// Clients that write aren't currently re-used.
assertEquals(0, peerCache.size());
assertXceiverCount(0);
// Reads the file, so we should get a
// cached socket, and should have an xceiver on the other side.
DFSTestUtil.readFile(fs, TEST_FILE);
assertEquals(1, peerCache.size());
assertXceiverCount(1);
// Sleep for a bit longer than the client keepalive timeout.
Thread.sleep(CLIENT_EXPIRY_MS + 50);
// Taking out a peer which is expired should give a null.
Peer peer = peerCache.get(dn.getDatanodeId(), false);
assertTrue(peer == null);
// The socket cache is now empty.
assertEquals(0, peerCache.size());
}
/**
* Test for the case where the client beings to read a long block, but doesn't
* read bytes off the stream quickly. The datanode should time out sending the
* chunks and the transceiver should die, even if it has a long keepalive.
*/
@Test(timeout=300000)
public void testSlowReader() throws Exception {
// Set a client socket cache expiry time much longer than
// the datanode-side expiration time.
final long CLIENT_EXPIRY_MS = 600000L;
Configuration clientConf = new Configuration(conf);
clientConf.setLong(DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY, CLIENT_EXPIRY_MS);
clientConf.set(DFS_CLIENT_CONTEXT, "testSlowReader");
DistributedFileSystem fs =
(DistributedFileSystem)FileSystem.get(cluster.getURI(),
clientConf);
// Restart the DN with a shorter write timeout.
DataNodeProperties props = cluster.stopDataNode(0);
props.conf.setInt(DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
WRITE_TIMEOUT);
props.conf.setInt(DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY,
120000);
assertTrue(cluster.restartDataNode(props, true));
dn = cluster.getDataNodes().get(0);
// Wait for heartbeats to avoid a startup race where we
// try to write the block while the DN is still starting.
cluster.triggerHeartbeats();
DFSTestUtil.createFile(fs, TEST_FILE, 1024*1024*8L, (short)1, 0L);
FSDataInputStream stm = fs.open(TEST_FILE);
stm.read();
assertXceiverCount(1);
GenericTestUtils.waitFor(new Supplier<Boolean>() {
public Boolean get() {
// DN should time out in sendChunks, and this should force
// the xceiver to exit.
return getXceiverCountWithoutServer() == 0;
}
}, 500, 50000);
IOUtils.closeStream(stm);
}
@Test(timeout=30000)
public void testManyClosedSocketsInCache() throws Exception {
// Make a small file
Configuration clientConf = new Configuration(conf);
clientConf.set(DFS_CLIENT_CONTEXT, "testManyClosedSocketsInCache");
DistributedFileSystem fs =
(DistributedFileSystem)FileSystem.get(cluster.getURI(),
clientConf);
PeerCache peerCache = ClientContext.getFromConf(clientConf).getPeerCache();
DFSTestUtil.createFile(fs, TEST_FILE, 1L, (short)1, 0L);
// Insert a bunch of dead sockets in the cache, by opening
// many streams concurrently, reading all of the data,
// and then closing them.
InputStream[] stms = new InputStream[5];
try {
for (int i = 0; i < stms.length; i++) {
stms[i] = fs.open(TEST_FILE);
}
for (InputStream stm : stms) {
IOUtils.copyBytes(stm, new IOUtils.NullOutputStream(), 1024);
}
} finally {
IOUtils.cleanup(null, stms);
}
assertEquals(5, peerCache.size());
// Let all the xceivers timeout
Thread.sleep(1500);
assertXceiverCount(0);
// Client side still has the sockets cached
assertEquals(5, peerCache.size());
// Reading should not throw an exception.
DFSTestUtil.readFile(fs, TEST_FILE);
}
private void assertXceiverCount(int expected) {
int count = getXceiverCountWithoutServer();
if (count != expected) {
ReflectionUtils.printThreadInfo(System.err, "Thread dumps");
fail("Expected " + expected + " xceivers, found " +
count);
}
}
/**
* Returns the datanode's xceiver count, but subtracts 1, since the
* DataXceiverServer counts as one.
*
* @return int xceiver count, not including DataXceiverServer
*/
private int getXceiverCountWithoutServer() {
return dn.getXceiverCount() - 1;
}
}
| 9,743 | 36.19084 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import java.io.IOException;
import java.util.Arrays;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure;
import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure.Policy;
import org.apache.hadoop.io.IOUtils;
import org.apache.log4j.Level;
import org.junit.Assert;
import org.junit.Test;
/**
* This class tests that a file need not be closed before its
* data can be read by another client.
*/
public class TestReplaceDatanodeOnFailure {
static final Log LOG = AppendTestUtil.LOG;
static final String DIR = "/" + TestReplaceDatanodeOnFailure.class.getSimpleName() + "/";
static final short REPLICATION = 3;
final private static String RACK0 = "/rack0";
final private static String RACK1 = "/rack1";
{
((Log4JLogger)DataTransferProtocol.LOG).getLogger().setLevel(Level.ALL);
}
/** Test DEFAULT ReplaceDatanodeOnFailure policy. */
@Test
public void testDefaultPolicy() throws Exception {
final Configuration conf = new HdfsConfiguration();
final ReplaceDatanodeOnFailure p = ReplaceDatanodeOnFailure.get(conf);
final DatanodeInfo[] infos = new DatanodeInfo[5];
final DatanodeInfo[][] datanodes = new DatanodeInfo[infos.length + 1][];
datanodes[0] = new DatanodeInfo[0];
for(int i = 0; i < infos.length; ) {
infos[i] = DFSTestUtil.getLocalDatanodeInfo(50020 + i);
i++;
datanodes[i] = new DatanodeInfo[i];
System.arraycopy(infos, 0, datanodes[i], 0, datanodes[i].length);
}
final boolean[] isAppend = {true, true, false, false};
final boolean[] isHflushed = {true, false, true, false};
for(short replication = 1; replication <= infos.length; replication++) {
for(int nExistings = 0; nExistings < datanodes.length; nExistings++) {
final DatanodeInfo[] existings = datanodes[nExistings];
Assert.assertEquals(nExistings, existings.length);
for(int i = 0; i < isAppend.length; i++) {
for(int j = 0; j < isHflushed.length; j++) {
final int half = replication/2;
final boolean enoughReplica = replication <= nExistings;
final boolean noReplica = nExistings == 0;
final boolean replicationL3 = replication < 3;
final boolean existingsLEhalf = nExistings <= half;
final boolean isAH = isAppend[i] || isHflushed[j];
final boolean expected;
if (enoughReplica || noReplica || replicationL3) {
expected = false;
} else {
expected = isAH || existingsLEhalf;
}
final boolean computed = p.satisfy(
replication, existings, isAppend[i], isHflushed[j]);
try {
Assert.assertEquals(expected, computed);
} catch(AssertionError e) {
final String s = "replication=" + replication
+ "\nnExistings =" + nExistings
+ "\nisAppend =" + isAppend[i]
+ "\nisHflushed =" + isHflushed[j];
throw new RuntimeException(s, e);
}
}
}
}
}
}
/** Test replace datanode on failure. */
@Test
public void testReplaceDatanodeOnFailure() throws Exception {
final Configuration conf = new HdfsConfiguration();
//always replace a datanode
ReplaceDatanodeOnFailure.write(Policy.ALWAYS, true, conf);
final String[] racks = new String[REPLICATION];
Arrays.fill(racks, RACK0);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf
).racks(racks).numDataNodes(REPLICATION).build();
try {
final DistributedFileSystem fs = cluster.getFileSystem();
final Path dir = new Path(DIR);
final SlowWriter[] slowwriters = new SlowWriter[10];
for(int i = 1; i <= slowwriters.length; i++) {
//create slow writers in different speed
slowwriters[i - 1] = new SlowWriter(fs, new Path(dir, "file" + i), i*200L);
}
for(SlowWriter s : slowwriters) {
s.start();
}
// Let slow writers write something.
// Some of them are too slow and will be not yet started.
sleepSeconds(1);
//start new datanodes
cluster.startDataNodes(conf, 2, true, null, new String[]{RACK1, RACK1});
//stop an old datanode
cluster.stopDataNode(AppendTestUtil.nextInt(REPLICATION));
//Let the slow writer writes a few more seconds
//Everyone should have written something.
sleepSeconds(5);
//check replication and interrupt.
for(SlowWriter s : slowwriters) {
s.checkReplication();
s.interruptRunning();
}
//close files
for(SlowWriter s : slowwriters) {
s.joinAndClose();
}
//Verify the file
LOG.info("Verify the file");
for(int i = 0; i < slowwriters.length; i++) {
LOG.info(slowwriters[i].filepath + ": length="
+ fs.getFileStatus(slowwriters[i].filepath).getLen());
FSDataInputStream in = null;
try {
in = fs.open(slowwriters[i].filepath);
for(int j = 0, x; (x = in.read()) != -1; j++) {
Assert.assertEquals(j, x);
}
}
finally {
IOUtils.closeStream(in);
}
}
} finally {
if (cluster != null) {cluster.shutdown();}
}
}
static void sleepSeconds(final int waittime) throws InterruptedException {
LOG.info("Wait " + waittime + " seconds");
Thread.sleep(waittime * 1000L);
}
static class SlowWriter extends Thread {
final Path filepath;
final HdfsDataOutputStream out;
final long sleepms;
private volatile boolean running = true;
SlowWriter(DistributedFileSystem fs, Path filepath, final long sleepms
) throws IOException {
super(SlowWriter.class.getSimpleName() + ":" + filepath);
this.filepath = filepath;
this.out = (HdfsDataOutputStream)fs.create(filepath, REPLICATION);
this.sleepms = sleepms;
}
@Override
public void run() {
int i = 0;
try {
sleep(sleepms);
for(; running; i++) {
LOG.info(getName() + " writes " + i);
out.write(i);
out.hflush();
sleep(sleepms);
}
} catch(InterruptedException e) {
LOG.info(getName() + " interrupted:" + e);
} catch(IOException e) {
throw new RuntimeException(getName(), e);
} finally {
LOG.info(getName() + " terminated: i=" + i);
}
}
void interruptRunning() {
running = false;
interrupt();
}
void joinAndClose() throws InterruptedException {
LOG.info(getName() + " join and close");
join();
IOUtils.closeStream(out);
}
void checkReplication() throws IOException {
Assert.assertEquals(REPLICATION, out.getCurrentBlockReplication());
}
}
@Test
public void testAppend() throws Exception {
final Configuration conf = new HdfsConfiguration();
final short REPLICATION = (short)3;
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf
).numDataNodes(1).build();
try {
final DistributedFileSystem fs = cluster.getFileSystem();
final Path f = new Path(DIR, "testAppend");
{
LOG.info("create an empty file " + f);
fs.create(f, REPLICATION).close();
final FileStatus status = fs.getFileStatus(f);
Assert.assertEquals(REPLICATION, status.getReplication());
Assert.assertEquals(0L, status.getLen());
}
final byte[] bytes = new byte[1000];
{
LOG.info("append " + bytes.length + " bytes to " + f);
final FSDataOutputStream out = fs.append(f);
out.write(bytes);
out.close();
final FileStatus status = fs.getFileStatus(f);
Assert.assertEquals(REPLICATION, status.getReplication());
Assert.assertEquals(bytes.length, status.getLen());
}
{
LOG.info("append another " + bytes.length + " bytes to " + f);
try {
final FSDataOutputStream out = fs.append(f);
out.write(bytes);
out.close();
Assert.fail();
} catch(IOException ioe) {
LOG.info("This exception is expected", ioe);
}
}
} finally {
if (cluster != null) {cluster.shutdown();}
}
}
@Test
public void testBestEffort() throws Exception {
final Configuration conf = new HdfsConfiguration();
//always replace a datanode but do not throw exception
ReplaceDatanodeOnFailure.write(Policy.ALWAYS, true, conf);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf
).numDataNodes(1).build();
try {
final DistributedFileSystem fs = cluster.getFileSystem();
final Path f = new Path(DIR, "testIgnoreReplaceFailure");
final byte[] bytes = new byte[1000];
{
LOG.info("write " + bytes.length + " bytes to " + f);
final FSDataOutputStream out = fs.create(f, REPLICATION);
out.write(bytes);
out.close();
final FileStatus status = fs.getFileStatus(f);
Assert.assertEquals(REPLICATION, status.getReplication());
Assert.assertEquals(bytes.length, status.getLen());
}
{
LOG.info("append another " + bytes.length + " bytes to " + f);
final FSDataOutputStream out = fs.append(f);
out.write(bytes);
out.close();
}
} finally {
if (cluster != null) {cluster.shutdown();}
}
}
}
| 10,940 | 32.458716 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRemoteBlockReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
public class TestRemoteBlockReader extends TestBlockReaderBase {
HdfsConfiguration createConf() {
HdfsConfiguration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER, true);
return conf;
}
}
| 1,094 | 38.107143 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestKeyProviderCache.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import java.io.IOException;
import java.net.URI;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.crypto.key.KeyProviderFactory;
import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
import org.junit.Assert;
import org.junit.Test;
public class TestKeyProviderCache {
public static class DummyKeyProvider extends KeyProvider {
public DummyKeyProvider(Configuration conf) {
super(conf);
}
@Override
public KeyVersion getKeyVersion(String versionName) throws IOException {
return null;
}
@Override
public List<String> getKeys() throws IOException {
return null;
}
@Override
public List<KeyVersion> getKeyVersions(String name) throws IOException {
return null;
}
@Override
public Metadata getMetadata(String name) throws IOException {
return null;
}
@Override
public KeyVersion createKey(String name, byte[] material, Options options)
throws IOException {
return null;
}
@Override
public void deleteKey(String name) throws IOException {
}
@Override
public KeyVersion rollNewVersion(String name, byte[] material)
throws IOException {
return null;
}
@Override
public void flush() throws IOException {
}
}
public static class Factory extends KeyProviderFactory {
@Override
public KeyProvider createProvider(URI providerName, Configuration conf)
throws IOException {
if ("dummy".equals(providerName.getScheme())) {
return new DummyKeyProvider(conf);
}
return null;
}
}
@Test
public void testCache() throws Exception {
KeyProviderCache kpCache = new KeyProviderCache(10000);
Configuration conf = new Configuration();
conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
"dummy://foo:bar@test_provider1");
KeyProvider keyProvider1 = kpCache.get(conf);
Assert.assertNotNull("Returned Key Provider is null !!", keyProvider1);
conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
"dummy://foo:bar@test_provider1");
KeyProvider keyProvider2 = kpCache.get(conf);
Assert.assertTrue("Different KeyProviders returned !!",
keyProvider1 == keyProvider2);
conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
"dummy://test_provider3");
KeyProvider keyProvider3 = kpCache.get(conf);
Assert.assertFalse("Same KeyProviders returned !!",
keyProvider1 == keyProvider3);
conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
"dummy://hello:there@test_provider1");
KeyProvider keyProvider4 = kpCache.get(conf);
Assert.assertFalse("Same KeyProviders returned !!",
keyProvider1 == keyProvider4);
}
}
| 3,676 | 28.416 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.io.OutputStream;
import java.net.InetAddress;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileChecksum;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver;
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil;
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferServer;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import org.apache.log4j.Level;
import org.apache.log4j.LogManager;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
import org.mockito.Mockito;
@RunWith(Parameterized.class)
public class TestEncryptedTransfer {
{
LogManager.getLogger(SaslDataTransferServer.class).setLevel(Level.DEBUG);
LogManager.getLogger(DataTransferSaslUtil.class).setLevel(Level.DEBUG);
}
@Parameters
public static Collection<Object[]> data() {
Collection<Object[]> params = new ArrayList<Object[]>();
params.add(new Object[]{null});
params.add(new Object[]{"org.apache.hadoop.hdfs.TestEncryptedTransfer$TestTrustedChannelResolver"});
return params;
}
private static final Log LOG = LogFactory.getLog(TestEncryptedTransfer.class);
private static final String PLAIN_TEXT = "this is very secret plain text";
private static final Path TEST_PATH = new Path("/non-encrypted-file");
private void setEncryptionConfigKeys(Configuration conf) {
conf.setBoolean(DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY, true);
conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
if (resolverClazz != null){
conf.set(DFSConfigKeys.DFS_TRUSTEDCHANNEL_RESOLVER_CLASS, resolverClazz);
}
}
// Unset DFS_ENCRYPT_DATA_TRANSFER_KEY and DFS_DATA_ENCRYPTION_ALGORITHM_KEY
// on the client side to ensure that clients will detect this setting
// automatically from the NN.
private static FileSystem getFileSystem(Configuration conf) throws IOException {
Configuration localConf = new Configuration(conf);
localConf.setBoolean(DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY, false);
localConf.unset(DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY);
return FileSystem.get(localConf);
}
String resolverClazz;
public TestEncryptedTransfer(String resolverClazz){
this.resolverClazz = resolverClazz;
}
@Test
public void testEncryptedRead() throws IOException {
MiniDFSCluster cluster = null;
try {
Configuration conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = getFileSystem(conf);
writeTestDataToFile(fs);
assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
FileChecksum checksum = fs.getFileChecksum(TEST_PATH);
fs.close();
cluster.shutdown();
setEncryptionConfigKeys(conf);
cluster = new MiniDFSCluster.Builder(conf)
.manageDataDfsDirs(false)
.manageNameDfsDirs(false)
.format(false)
.startupOption(StartupOption.REGULAR)
.build();
fs = getFileSystem(conf);
LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
LogFactory.getLog(SaslDataTransferServer.class));
LogCapturer logs1 = GenericTestUtils.LogCapturer.captureLogs(
LogFactory.getLog(DataTransferSaslUtil.class));
try {
assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
assertEquals(checksum, fs.getFileChecksum(TEST_PATH));
} finally {
logs.stopCapturing();
logs1.stopCapturing();
}
fs.close();
if (resolverClazz == null) {
// Test client and server negotiate cipher option
GenericTestUtils.assertDoesNotMatch(logs.getOutput(),
"Server using cipher suite");
// Check the IOStreamPair
GenericTestUtils.assertDoesNotMatch(logs1.getOutput(),
"Creating IOStreamPair of CryptoInputStream and CryptoOutputStream.");
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test
public void testEncryptedReadWithRC4() throws IOException {
MiniDFSCluster cluster = null;
try {
Configuration conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = getFileSystem(conf);
writeTestDataToFile(fs);
assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
FileChecksum checksum = fs.getFileChecksum(TEST_PATH);
fs.close();
cluster.shutdown();
setEncryptionConfigKeys(conf);
// It'll use 3DES by default, but we set it to rc4 here.
conf.set(DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY, "rc4");
cluster = new MiniDFSCluster.Builder(conf)
.manageDataDfsDirs(false)
.manageNameDfsDirs(false)
.format(false)
.startupOption(StartupOption.REGULAR)
.build();
fs = getFileSystem(conf);
LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
LogFactory.getLog(SaslDataTransferServer.class));
LogCapturer logs1 = GenericTestUtils.LogCapturer.captureLogs(
LogFactory.getLog(DataTransferSaslUtil.class));
try {
assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
assertEquals(checksum, fs.getFileChecksum(TEST_PATH));
} finally {
logs.stopCapturing();
logs1.stopCapturing();
}
fs.close();
if (resolverClazz == null) {
// Test client and server negotiate cipher option
GenericTestUtils.assertDoesNotMatch(logs.getOutput(),
"Server using cipher suite");
// Check the IOStreamPair
GenericTestUtils.assertDoesNotMatch(logs1.getOutput(),
"Creating IOStreamPair of CryptoInputStream and CryptoOutputStream.");
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test
public void testEncryptedReadWithAES() throws IOException {
MiniDFSCluster cluster = null;
try {
Configuration conf = new Configuration();
conf.set(DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY,
"AES/CTR/NoPadding");
cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = getFileSystem(conf);
writeTestDataToFile(fs);
assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
FileChecksum checksum = fs.getFileChecksum(TEST_PATH);
fs.close();
cluster.shutdown();
setEncryptionConfigKeys(conf);
cluster = new MiniDFSCluster.Builder(conf)
.manageDataDfsDirs(false)
.manageNameDfsDirs(false)
.format(false)
.startupOption(StartupOption.REGULAR)
.build();
fs = getFileSystem(conf);
LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
LogFactory.getLog(SaslDataTransferServer.class));
LogCapturer logs1 = GenericTestUtils.LogCapturer.captureLogs(
LogFactory.getLog(DataTransferSaslUtil.class));
try {
assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
assertEquals(checksum, fs.getFileChecksum(TEST_PATH));
} finally {
logs.stopCapturing();
logs1.stopCapturing();
}
fs.close();
if (resolverClazz == null) {
// Test client and server negotiate cipher option
GenericTestUtils.assertMatches(logs.getOutput(),
"Server using cipher suite");
// Check the IOStreamPair
GenericTestUtils.assertMatches(logs1.getOutput(),
"Creating IOStreamPair of CryptoInputStream and CryptoOutputStream.");
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test
public void testEncryptedReadAfterNameNodeRestart() throws IOException {
MiniDFSCluster cluster = null;
try {
Configuration conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = getFileSystem(conf);
writeTestDataToFile(fs);
assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
FileChecksum checksum = fs.getFileChecksum(TEST_PATH);
fs.close();
cluster.shutdown();
setEncryptionConfigKeys(conf);
cluster = new MiniDFSCluster.Builder(conf)
.manageDataDfsDirs(false)
.manageNameDfsDirs(false)
.format(false)
.startupOption(StartupOption.REGULAR)
.build();
fs = getFileSystem(conf);
assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
assertEquals(checksum, fs.getFileChecksum(TEST_PATH));
fs.close();
cluster.restartNameNode();
fs = getFileSystem(conf);
assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
assertEquals(checksum, fs.getFileChecksum(TEST_PATH));
fs.close();
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test
public void testClientThatDoesNotSupportEncryption() throws IOException {
MiniDFSCluster cluster = null;
try {
Configuration conf = new Configuration();
// Set short retry timeouts so this test runs faster
conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = getFileSystem(conf);
writeTestDataToFile(fs);
assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
fs.close();
cluster.shutdown();
setEncryptionConfigKeys(conf);
cluster = new MiniDFSCluster.Builder(conf)
.manageDataDfsDirs(false)
.manageNameDfsDirs(false)
.format(false)
.startupOption(StartupOption.REGULAR)
.build();
fs = getFileSystem(conf);
DFSClient client = DFSClientAdapter.getDFSClient((DistributedFileSystem) fs);
DFSClient spyClient = Mockito.spy(client);
Mockito.doReturn(false).when(spyClient).shouldEncryptData();
DFSClientAdapter.setDFSClient((DistributedFileSystem) fs, spyClient);
LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
LogFactory.getLog(DataNode.class));
try {
assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
if (resolverClazz != null && !resolverClazz.endsWith("TestTrustedChannelResolver")){
fail("Should not have been able to read without encryption enabled.");
}
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("Could not obtain block:",
ioe);
} finally {
logs.stopCapturing();
}
fs.close();
if (resolverClazz == null) {
GenericTestUtils.assertMatches(logs.getOutput(),
"Failed to read expected encryption handshake from client at");
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test
public void testLongLivedReadClientAfterRestart() throws IOException {
MiniDFSCluster cluster = null;
try {
Configuration conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = getFileSystem(conf);
writeTestDataToFile(fs);
assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
FileChecksum checksum = fs.getFileChecksum(TEST_PATH);
fs.close();
cluster.shutdown();
setEncryptionConfigKeys(conf);
cluster = new MiniDFSCluster.Builder(conf)
.manageDataDfsDirs(false)
.manageNameDfsDirs(false)
.format(false)
.startupOption(StartupOption.REGULAR)
.build();
fs = getFileSystem(conf);
assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
assertEquals(checksum, fs.getFileChecksum(TEST_PATH));
// Restart the NN and DN, after which the client's encryption key will no
// longer be valid.
cluster.restartNameNode();
assertTrue(cluster.restartDataNode(0));
assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
assertEquals(checksum, fs.getFileChecksum(TEST_PATH));
fs.close();
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test
public void testLongLivedWriteClientAfterRestart() throws IOException {
MiniDFSCluster cluster = null;
try {
Configuration conf = new Configuration();
setEncryptionConfigKeys(conf);
cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = getFileSystem(conf);
writeTestDataToFile(fs);
assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
// Restart the NN and DN, after which the client's encryption key will no
// longer be valid.
cluster.restartNameNode();
assertTrue(cluster.restartDataNodes());
cluster.waitActive();
writeTestDataToFile(fs);
assertEquals(PLAIN_TEXT + PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
fs.close();
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test
public void testLongLivedClient() throws IOException, InterruptedException {
MiniDFSCluster cluster = null;
try {
Configuration conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = getFileSystem(conf);
writeTestDataToFile(fs);
assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
FileChecksum checksum = fs.getFileChecksum(TEST_PATH);
fs.close();
cluster.shutdown();
setEncryptionConfigKeys(conf);
cluster = new MiniDFSCluster.Builder(conf)
.manageDataDfsDirs(false)
.manageNameDfsDirs(false)
.format(false)
.startupOption(StartupOption.REGULAR)
.build();
BlockTokenSecretManager btsm = cluster.getNamesystem().getBlockManager()
.getBlockTokenSecretManager();
btsm.setKeyUpdateIntervalForTesting(2 * 1000);
btsm.setTokenLifetime(2 * 1000);
btsm.clearAllKeysForTesting();
fs = getFileSystem(conf);
assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
assertEquals(checksum, fs.getFileChecksum(TEST_PATH));
// Sleep for 15 seconds, after which the encryption key will no longer be
// valid. It needs to be a few multiples of the block token lifetime,
// since several block tokens are valid at any given time (the current
// and the last two, by default.)
LOG.info("Sleeping so that encryption keys expire...");
Thread.sleep(15 * 1000);
LOG.info("Done sleeping.");
assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
assertEquals(checksum, fs.getFileChecksum(TEST_PATH));
fs.close();
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test
public void testEncryptedWriteWithOneDn() throws IOException {
testEncryptedWrite(1);
}
@Test
public void testEncryptedWriteWithTwoDns() throws IOException {
testEncryptedWrite(2);
}
@Test
public void testEncryptedWriteWithMultipleDns() throws IOException {
testEncryptedWrite(10);
}
private void testEncryptedWrite(int numDns) throws IOException {
MiniDFSCluster cluster = null;
try {
Configuration conf = new Configuration();
setEncryptionConfigKeys(conf);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDns).build();
FileSystem fs = getFileSystem(conf);
LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
LogFactory.getLog(SaslDataTransferServer.class));
LogCapturer logs1 = GenericTestUtils.LogCapturer.captureLogs(
LogFactory.getLog(DataTransferSaslUtil.class));
try {
writeTestDataToFile(fs);
} finally {
logs.stopCapturing();
logs1.stopCapturing();
}
assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
fs.close();
if (resolverClazz == null) {
// Test client and server negotiate cipher option
GenericTestUtils.assertDoesNotMatch(logs.getOutput(),
"Server using cipher suite");
// Check the IOStreamPair
GenericTestUtils.assertDoesNotMatch(logs1.getOutput(),
"Creating IOStreamPair of CryptoInputStream and CryptoOutputStream.");
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test
public void testEncryptedAppend() throws IOException {
MiniDFSCluster cluster = null;
try {
Configuration conf = new Configuration();
setEncryptionConfigKeys(conf);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
FileSystem fs = getFileSystem(conf);
writeTestDataToFile(fs);
assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
writeTestDataToFile(fs);
assertEquals(PLAIN_TEXT + PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
fs.close();
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test
public void testEncryptedAppendRequiringBlockTransfer() throws IOException {
MiniDFSCluster cluster = null;
try {
Configuration conf = new Configuration();
setEncryptionConfigKeys(conf);
// start up 4 DNs
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
FileSystem fs = getFileSystem(conf);
// Create a file with replication 3, so its block is on 3 / 4 DNs.
writeTestDataToFile(fs);
assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
// Shut down one of the DNs holding a block replica.
FSDataInputStream in = fs.open(TEST_PATH);
List<LocatedBlock> locatedBlocks = DFSTestUtil.getAllBlocks(in);
in.close();
assertEquals(1, locatedBlocks.size());
assertEquals(3, locatedBlocks.get(0).getLocations().length);
DataNode dn = cluster.getDataNode(locatedBlocks.get(0).getLocations()[0].getIpcPort());
dn.shutdown();
// Reopen the file for append, which will need to add another DN to the
// pipeline and in doing so trigger a block transfer.
writeTestDataToFile(fs);
assertEquals(PLAIN_TEXT + PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
fs.close();
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
private static void writeTestDataToFile(FileSystem fs) throws IOException {
OutputStream out = null;
if (!fs.exists(TEST_PATH)) {
out = fs.create(TEST_PATH);
} else {
out = fs.append(TEST_PATH);
}
out.write(PLAIN_TEXT.getBytes());
out.close();
}
static class TestTrustedChannelResolver extends TrustedChannelResolver {
public boolean isTrusted(){
return true;
}
public boolean isTrusted(InetAddress peerAddress){
return true;
}
}
}
| 21,104 | 32.876404 | 104 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHFlush.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.io.InterruptedIOException;
import java.util.EnumSet;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.io.IOUtils;
import org.apache.log4j.Level;
import org.junit.Test;
/** Class contains a set of tests to verify the correctness of
* newly introduced {@link FSDataOutputStream#hflush()} method */
public class TestHFlush {
{
((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
}
private final String fName = "hflushtest.dat";
/**
* The test uses
* {@link #doTheJob(Configuration, String, long, short, boolean, EnumSet)}
* to write a file with a standard block size
*/
@Test
public void hFlush_01() throws IOException {
doTheJob(new HdfsConfiguration(), fName, AppendTestUtil.BLOCK_SIZE,
(short) 2, false, EnumSet.noneOf(SyncFlag.class));
}
/**
* The test uses
* {@link #doTheJob(Configuration, String, long, short, boolean, EnumSet)}
* to write a file with a custom block size so the writes will be
* happening across block' boundaries
*/
@Test
public void hFlush_02() throws IOException {
Configuration conf = new HdfsConfiguration();
int customPerChecksumSize = 512;
int customBlockSize = customPerChecksumSize * 3;
// Modify defaul filesystem settings
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
doTheJob(conf, fName, customBlockSize, (short) 2, false,
EnumSet.noneOf(SyncFlag.class));
}
/**
* The test uses
* {@link #doTheJob(Configuration, String, long, short, boolean, EnumSet)}
* to write a file with a custom block size so the writes will be
* happening across block's and checksum' boundaries
*/
@Test
public void hFlush_03() throws IOException {
Configuration conf = new HdfsConfiguration();
int customPerChecksumSize = 400;
int customBlockSize = customPerChecksumSize * 3;
// Modify defaul filesystem settings
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
doTheJob(conf, fName, customBlockSize, (short) 2, false,
EnumSet.noneOf(SyncFlag.class));
}
/**
* Test hsync (with updating block length in NameNode) while no data is
* actually written yet
*/
@Test
public void hSyncUpdateLength_00() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(
2).build();
DistributedFileSystem fileSystem =
cluster.getFileSystem();
try {
Path path = new Path(fName);
FSDataOutputStream stm = fileSystem.create(path, true, 4096, (short) 2,
AppendTestUtil.BLOCK_SIZE);
System.out.println("Created file " + path.toString());
((DFSOutputStream) stm.getWrappedStream()).hsync(EnumSet
.of(SyncFlag.UPDATE_LENGTH));
long currentFileLength = fileSystem.getFileStatus(path).getLen();
assertEquals(0L, currentFileLength);
stm.close();
} finally {
fileSystem.close();
cluster.shutdown();
}
}
/**
* Test hsync with END_BLOCK flag.
*/
@Test
public void hSyncEndBlock_00() throws IOException {
final int preferredBlockSize = 1024;
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, preferredBlockSize);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2)
.build();
DistributedFileSystem fileSystem = cluster.getFileSystem();
FSDataOutputStream stm = null;
try {
Path path = new Path("/" + fName);
stm = fileSystem.create(path, true, 4096, (short) 2,
AppendTestUtil.BLOCK_SIZE);
System.out.println("Created file " + path.toString());
((DFSOutputStream) stm.getWrappedStream()).hsync(EnumSet
.of(SyncFlag.END_BLOCK));
long currentFileLength = fileSystem.getFileStatus(path).getLen();
assertEquals(0L, currentFileLength);
LocatedBlocks blocks = fileSystem.dfs.getLocatedBlocks(path.toString(), 0);
assertEquals(0, blocks.getLocatedBlocks().size());
// write a block and call hsync(end_block) at the block boundary
stm.write(new byte[preferredBlockSize]);
((DFSOutputStream) stm.getWrappedStream()).hsync(EnumSet
.of(SyncFlag.END_BLOCK));
currentFileLength = fileSystem.getFileStatus(path).getLen();
assertEquals(preferredBlockSize, currentFileLength);
blocks = fileSystem.dfs.getLocatedBlocks(path.toString(), 0);
assertEquals(1, blocks.getLocatedBlocks().size());
// call hsync then call hsync(end_block) immediately
stm.write(new byte[preferredBlockSize / 2]);
stm.hsync();
((DFSOutputStream) stm.getWrappedStream()).hsync(EnumSet
.of(SyncFlag.END_BLOCK));
currentFileLength = fileSystem.getFileStatus(path).getLen();
assertEquals(preferredBlockSize + preferredBlockSize / 2,
currentFileLength);
blocks = fileSystem.dfs.getLocatedBlocks(path.toString(), 0);
assertEquals(2, blocks.getLocatedBlocks().size());
stm.write(new byte[preferredBlockSize / 4]);
stm.hsync();
currentFileLength = fileSystem.getFileStatus(path).getLen();
assertEquals(preferredBlockSize + preferredBlockSize / 2
+ preferredBlockSize / 4, currentFileLength);
blocks = fileSystem.dfs.getLocatedBlocks(path.toString(), 0);
assertEquals(3, blocks.getLocatedBlocks().size());
} finally {
IOUtils.cleanup(null, stm, fileSystem);
if (cluster != null) {
cluster.shutdown();
}
}
}
/**
* The test calls
* {@link #doTheJob(Configuration, String, long, short, boolean, EnumSet)}
* while requiring the semantic of {@link SyncFlag#UPDATE_LENGTH}.
*/
@Test
public void hSyncUpdateLength_01() throws IOException {
doTheJob(new HdfsConfiguration(), fName, AppendTestUtil.BLOCK_SIZE,
(short) 2, true, EnumSet.of(SyncFlag.UPDATE_LENGTH));
}
/**
* The test calls
* {@link #doTheJob(Configuration, String, long, short, boolean, EnumSet)}
* while requiring the semantic of {@link SyncFlag#END_BLOCK}.
*/
@Test
public void hSyncEndBlock_01() throws IOException {
doTheJob(new HdfsConfiguration(), fName, AppendTestUtil.BLOCK_SIZE,
(short) 2, true, EnumSet.of(SyncFlag.END_BLOCK));
}
/**
* The test calls
* {@link #doTheJob(Configuration, String, long, short, boolean, EnumSet)}
* while requiring the semantic of {@link SyncFlag#END_BLOCK} and
* {@link SyncFlag#UPDATE_LENGTH}.
*/
@Test
public void hSyncEndBlockAndUpdateLength() throws IOException {
doTheJob(new HdfsConfiguration(), fName, AppendTestUtil.BLOCK_SIZE,
(short) 2, true, EnumSet.of(SyncFlag.END_BLOCK, SyncFlag.UPDATE_LENGTH));
}
/**
* The test calls
* {@link #doTheJob(Configuration, String, long, short, boolean, EnumSet)}
* while requiring the semantic of {@link SyncFlag#UPDATE_LENGTH}.
* Similar with {@link #hFlush_02()} , it writes a file with a custom block
* size so the writes will be happening across block' boundaries
*/
@Test
public void hSyncUpdateLength_02() throws IOException {
Configuration conf = new HdfsConfiguration();
int customPerChecksumSize = 512;
int customBlockSize = customPerChecksumSize * 3;
// Modify defaul filesystem settings
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
doTheJob(conf, fName, customBlockSize, (short) 2, true,
EnumSet.of(SyncFlag.UPDATE_LENGTH));
}
@Test
public void hSyncEndBlock_02() throws IOException {
Configuration conf = new HdfsConfiguration();
int customPerChecksumSize = 512;
int customBlockSize = customPerChecksumSize * 3;
// Modify defaul filesystem settings
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
doTheJob(conf, fName, customBlockSize, (short) 2, true,
EnumSet.of(SyncFlag.END_BLOCK));
}
/**
* The test calls
* {@link #doTheJob(Configuration, String, long, short, boolean, EnumSet)}
* while requiring the semantic of {@link SyncFlag#UPDATE_LENGTH}.
* Similar with {@link #hFlush_03()} , it writes a file with a custom block
* size so the writes will be happening across block's and checksum'
* boundaries.
*/
@Test
public void hSyncUpdateLength_03() throws IOException {
Configuration conf = new HdfsConfiguration();
int customPerChecksumSize = 400;
int customBlockSize = customPerChecksumSize * 3;
// Modify defaul filesystem settings
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
doTheJob(conf, fName, customBlockSize, (short) 2, true,
EnumSet.of(SyncFlag.UPDATE_LENGTH));
}
@Test
public void hSyncEndBlock_03() throws IOException {
Configuration conf = new HdfsConfiguration();
int customPerChecksumSize = 400;
int customBlockSize = customPerChecksumSize * 3;
// Modify defaul filesystem settings
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
doTheJob(conf, fName, customBlockSize, (short) 2, true,
EnumSet.of(SyncFlag.END_BLOCK));
}
/**
* The method starts new cluster with defined Configuration; creates a file
* with specified block_size and writes 10 equal sections in it; it also calls
* hflush/hsync after each write and throws an IOException in case of an error.
*
* @param conf cluster configuration
* @param fileName of the file to be created and processed as required
* @param block_size value to be used for the file's creation
* @param replicas is the number of replicas
* @param isSync hsync or hflush
* @param syncFlags specify the semantic of the sync/flush
* @throws IOException in case of any errors
*/
public static void doTheJob(Configuration conf, final String fileName,
long block_size, short replicas, boolean isSync,
EnumSet<SyncFlag> syncFlags) throws IOException {
byte[] fileContent;
final int SECTIONS = 10;
fileContent = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(replicas).build();
// Make sure we work with DFS in order to utilize all its functionality
DistributedFileSystem fileSystem = cluster.getFileSystem();
FSDataInputStream is;
try {
Path path = new Path(fileName);
final String pathName = new Path(fileSystem.getWorkingDirectory(), path)
.toUri().getPath();
FSDataOutputStream stm = fileSystem.create(path, false, 4096, replicas,
block_size);
System.out.println("Created file " + fileName);
int tenth = AppendTestUtil.FILE_SIZE/SECTIONS;
int rounding = AppendTestUtil.FILE_SIZE - tenth * SECTIONS;
for (int i=0; i<SECTIONS; i++) {
System.out.println("Writing " + (tenth * i) + " to "
+ (tenth * (i + 1)) + " section to file " + fileName);
// write to the file
stm.write(fileContent, tenth * i, tenth);
// Wait while hflush/hsync pushes all packets through built pipeline
if (isSync) {
((DFSOutputStream)stm.getWrappedStream()).hsync(syncFlags);
} else {
((DFSOutputStream)stm.getWrappedStream()).hflush();
}
// Check file length if updatelength is required
if (isSync && syncFlags.contains(SyncFlag.UPDATE_LENGTH)) {
long currentFileLength = fileSystem.getFileStatus(path).getLen();
assertEquals(
"File size doesn't match for hsync/hflush with updating the length",
tenth * (i + 1), currentFileLength);
} else if (isSync && syncFlags.contains(SyncFlag.END_BLOCK)) {
LocatedBlocks blocks = fileSystem.dfs.getLocatedBlocks(pathName, 0);
assertEquals(i + 1, blocks.getLocatedBlocks().size());
}
byte [] toRead = new byte[tenth];
byte [] expected = new byte[tenth];
System.arraycopy(fileContent, tenth * i, expected, 0, tenth);
// Open the same file for read. Need to create new reader after every write operation(!)
is = fileSystem.open(path);
is.seek(tenth * i);
int readBytes = is.read(toRead, 0, tenth);
System.out.println("Has read " + readBytes);
assertTrue("Should've get more bytes", (readBytes > 0) && (readBytes <= tenth));
is.close();
checkData(toRead, 0, readBytes, expected, "Partial verification");
}
System.out.println("Writing " + (tenth * SECTIONS) + " to " + (tenth * SECTIONS + rounding) + " section to file " + fileName);
stm.write(fileContent, tenth * SECTIONS, rounding);
stm.close();
assertEquals("File size doesn't match ", AppendTestUtil.FILE_SIZE, fileSystem.getFileStatus(path).getLen());
AppendTestUtil.checkFullFile(fileSystem, path, fileContent.length, fileContent, "hflush()");
} finally {
fileSystem.close();
cluster.shutdown();
}
}
static void checkData(final byte[] actual, int from, int len,
final byte[] expected, String message) {
for (int idx = 0; idx < len; idx++) {
assertEquals(message+" byte "+(from+idx)+" differs. expected "+
expected[from+idx]+" actual "+actual[idx],
expected[from+idx], actual[idx]);
actual[idx] = 0;
}
}
/** This creates a slow writer and check to see
* if pipeline heartbeats work fine
*/
@Test
public void testPipelineHeartbeat() throws Exception {
final int DATANODE_NUM = 2;
final int fileLen = 6;
Configuration conf = new HdfsConfiguration();
final int timeout = 2000;
conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY,
timeout);
final Path p = new Path("/pipelineHeartbeat/foo");
System.out.println("p=" + p);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build();
try {
DistributedFileSystem fs = cluster.getFileSystem();
byte[] fileContents = AppendTestUtil.initBuffer(fileLen);
// create a new file.
FSDataOutputStream stm = AppendTestUtil.createFile(fs, p, DATANODE_NUM);
stm.write(fileContents, 0, 1);
Thread.sleep(timeout);
stm.hflush();
System.out.println("Wrote 1 byte and hflush " + p);
// write another byte
Thread.sleep(timeout);
stm.write(fileContents, 1, 1);
stm.hflush();
stm.write(fileContents, 2, 1);
Thread.sleep(timeout);
stm.hflush();
stm.write(fileContents, 3, 1);
Thread.sleep(timeout);
stm.write(fileContents, 4, 1);
stm.hflush();
stm.write(fileContents, 5, 1);
Thread.sleep(timeout);
stm.close();
// verify that entire file is good
AppendTestUtil.checkFullFile(fs, p, fileLen,
fileContents, "Failed to slowly write to a file");
} finally {
cluster.shutdown();
}
}
@Test
public void testHFlushInterrupted() throws Exception {
final int DATANODE_NUM = 2;
final int fileLen = 6;
byte[] fileContents = AppendTestUtil.initBuffer(fileLen);
Configuration conf = new HdfsConfiguration();
final Path p = new Path("/hflush-interrupted");
System.out.println("p=" + p);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build();
try {
DistributedFileSystem fs = cluster.getFileSystem();
// create a new file.
FSDataOutputStream stm = AppendTestUtil.createFile(fs, p, DATANODE_NUM);
stm.write(fileContents, 0, 2);
Thread.currentThread().interrupt();
try {
stm.hflush();
// If we made it past the hflush(), then that means that the ack made it back
// from the pipeline before we got to the wait() call. In that case we should
// still have interrupted status.
assertTrue(Thread.interrupted());
} catch (InterruptedIOException ie) {
System.out.println("Got expected exception during flush");
}
assertFalse(Thread.interrupted());
// Try again to flush should succeed since we no longer have interrupt status
stm.hflush();
// Write some more data and flush
stm.write(fileContents, 2, 2);
stm.hflush();
// Write some data and close while interrupted
stm.write(fileContents, 4, 2);
Thread.currentThread().interrupt();
try {
stm.close();
// If we made it past the close(), then that means that the ack made it back
// from the pipeline before we got to the wait() call. In that case we should
// still have interrupted status.
assertTrue(Thread.interrupted());
} catch (InterruptedIOException ioe) {
System.out.println("Got expected exception during close");
// If we got the exception, we shouldn't have interrupted status anymore.
assertFalse(Thread.interrupted());
// Now do a successful close.
stm.close();
}
// verify that entire file is good
AppendTestUtil.checkFullFile(fs, p, 4, fileContents,
"Failed to deal with thread interruptions", false);
} finally {
cluster.shutdown();
}
}
}
| 19,210 | 37.422 | 132 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.JournalProtocolTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RpcClientUtil;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.RefreshUserMappingsProtocol;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
import org.apache.hadoop.security.protocolPB.RefreshAuthorizationPolicyProtocolClientSideTranslatorPB;
import org.apache.hadoop.security.protocolPB.RefreshUserMappingsProtocolClientSideTranslatorPB;
import org.apache.hadoop.ipc.protocolPB.RefreshCallQueueProtocolClientSideTranslatorPB;
import org.apache.hadoop.ipc.RefreshCallQueueProtocol;
import org.apache.hadoop.tools.GetUserMappingsProtocol;
import org.apache.hadoop.tools.protocolPB.GetUserMappingsProtocolClientSideTranslatorPB;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
/**
* Test cases to verify that client side translators correctly implement the
* isMethodSupported method in ProtocolMetaInterface.
*/
public class TestIsMethodSupported {
private static MiniDFSCluster cluster = null;
private static final HdfsConfiguration conf = new HdfsConfiguration();
private static InetSocketAddress nnAddress = null;
private static InetSocketAddress dnAddress = null;
@BeforeClass
public static void setUp() throws Exception {
cluster = (new MiniDFSCluster.Builder(conf))
.numDataNodes(1).build();
nnAddress = cluster.getNameNode().getNameNodeAddress();
DataNode dn = cluster.getDataNodes().get(0);
dnAddress = new InetSocketAddress(dn.getDatanodeId().getIpAddr(),
dn.getIpcPort());
}
@AfterClass
public static void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
@Test
public void testNamenodeProtocol() throws IOException {
NamenodeProtocol np =
NameNodeProxies.createNonHAProxy(conf,
nnAddress, NamenodeProtocol.class, UserGroupInformation.getCurrentUser(),
true).getProxy();
boolean exists = RpcClientUtil.isMethodSupported(np,
NamenodeProtocolPB.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER,
RPC.getProtocolVersion(NamenodeProtocolPB.class), "rollEditLog");
assertTrue(exists);
exists = RpcClientUtil.isMethodSupported(np,
NamenodeProtocolPB.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER,
RPC.getProtocolVersion(NamenodeProtocolPB.class), "bogusMethod");
assertFalse(exists);
}
@Test
public void testDatanodeProtocol() throws IOException {
DatanodeProtocolClientSideTranslatorPB translator =
new DatanodeProtocolClientSideTranslatorPB(nnAddress, conf);
assertTrue(translator.isMethodSupported("sendHeartbeat"));
}
@Test
public void testClientDatanodeProtocol() throws IOException {
ClientDatanodeProtocolTranslatorPB translator =
new ClientDatanodeProtocolTranslatorPB(nnAddress,
UserGroupInformation.getCurrentUser(), conf,
NetUtils.getDefaultSocketFactory(conf));
//Namenode doesn't implement ClientDatanodeProtocol
assertFalse(translator.isMethodSupported("refreshNamenodes"));
translator = new ClientDatanodeProtocolTranslatorPB(
dnAddress, UserGroupInformation.getCurrentUser(), conf,
NetUtils.getDefaultSocketFactory(conf));
assertTrue(translator.isMethodSupported("refreshNamenodes"));
}
@Test
public void testClientNamenodeProtocol() throws IOException {
ClientProtocol cp =
NameNodeProxies.createNonHAProxy(
conf, nnAddress, ClientProtocol.class,
UserGroupInformation.getCurrentUser(), true).getProxy();
RpcClientUtil.isMethodSupported(cp,
ClientNamenodeProtocolPB.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER,
RPC.getProtocolVersion(ClientNamenodeProtocolPB.class), "mkdirs");
}
@Test
public void tesJournalProtocol() throws IOException {
JournalProtocolTranslatorPB translator = (JournalProtocolTranslatorPB)
NameNodeProxies.createNonHAProxy(conf, nnAddress, JournalProtocol.class,
UserGroupInformation.getCurrentUser(), true).getProxy();
//Nameode doesn't implement JournalProtocol
assertFalse(translator.isMethodSupported("startLogSegment"));
}
@Test
public void testInterDatanodeProtocol() throws IOException {
InterDatanodeProtocolTranslatorPB translator =
new InterDatanodeProtocolTranslatorPB(
nnAddress, UserGroupInformation.getCurrentUser(), conf,
NetUtils.getDefaultSocketFactory(conf), 0);
//Not supported at namenode
assertFalse(translator.isMethodSupported("initReplicaRecovery"));
translator = new InterDatanodeProtocolTranslatorPB(
dnAddress, UserGroupInformation.getCurrentUser(), conf,
NetUtils.getDefaultSocketFactory(conf), 0);
assertTrue(translator.isMethodSupported("initReplicaRecovery"));
}
@Test
public void testGetUserMappingsProtocol() throws IOException {
GetUserMappingsProtocolClientSideTranslatorPB translator =
(GetUserMappingsProtocolClientSideTranslatorPB)
NameNodeProxies.createNonHAProxy(conf, nnAddress,
GetUserMappingsProtocol.class, UserGroupInformation.getCurrentUser(),
true).getProxy();
assertTrue(translator.isMethodSupported("getGroupsForUser"));
}
@Test
public void testRefreshAuthorizationPolicyProtocol() throws IOException {
RefreshAuthorizationPolicyProtocolClientSideTranslatorPB translator =
(RefreshAuthorizationPolicyProtocolClientSideTranslatorPB)
NameNodeProxies.createNonHAProxy(conf, nnAddress,
RefreshAuthorizationPolicyProtocol.class,
UserGroupInformation.getCurrentUser(), true).getProxy();
assertTrue(translator.isMethodSupported("refreshServiceAcl"));
}
@Test
public void testRefreshUserMappingsProtocol() throws IOException {
RefreshUserMappingsProtocolClientSideTranslatorPB translator =
(RefreshUserMappingsProtocolClientSideTranslatorPB)
NameNodeProxies.createNonHAProxy(conf, nnAddress,
RefreshUserMappingsProtocol.class,
UserGroupInformation.getCurrentUser(), true).getProxy();
assertTrue(
translator.isMethodSupported("refreshUserToGroupsMappings"));
}
@Test
public void testRefreshCallQueueProtocol() throws IOException {
RefreshCallQueueProtocolClientSideTranslatorPB translator =
(RefreshCallQueueProtocolClientSideTranslatorPB)
NameNodeProxies.createNonHAProxy(conf, nnAddress,
RefreshCallQueueProtocol.class,
UserGroupInformation.getCurrentUser(), true).getProxy();
assertTrue(
translator.isMethodSupported("refreshCallQueue"));
}
}
| 8,424 | 41.984694 | 102 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import java.io.*;
import java.security.Permission;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Scanner;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.zip.DeflaterOutputStream;
import java.util.zip.GZIPOutputStream;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.tools.DFSAdmin;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.BZip2Codec;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.Test;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS;
import static org.apache.hadoop.fs.permission.AclEntryScope.DEFAULT;
import static org.apache.hadoop.fs.permission.AclEntryType.*;
import static org.apache.hadoop.fs.permission.FsAction.*;
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.CoreMatchers.not;
import static org.junit.Assert.*;
import static org.hamcrest.core.StringContains.containsString;
import com.google.common.collect.Lists;
/**
* This class tests commands from DFSShell.
*/
public class TestDFSShell {
private static final Log LOG = LogFactory.getLog(TestDFSShell.class);
private static final AtomicInteger counter = new AtomicInteger();
private final int SUCCESS = 0;
private final int ERROR = 1;
static final String TEST_ROOT_DIR = PathUtils.getTestDirName(TestDFSShell.class);
private static final String RAW_A1 = "raw.a1";
private static final String TRUSTED_A1 = "trusted.a1";
private static final String USER_A1 = "user.a1";
private static final byte[] RAW_A1_VALUE = new byte[]{0x32, 0x32, 0x32};
private static final byte[] TRUSTED_A1_VALUE = new byte[]{0x31, 0x31, 0x31};
private static final byte[] USER_A1_VALUE = new byte[]{0x31, 0x32, 0x33};
static Path writeFile(FileSystem fs, Path f) throws IOException {
DataOutputStream out = fs.create(f);
out.writeBytes("dhruba: " + f);
out.close();
assertTrue(fs.exists(f));
return f;
}
static Path mkdir(FileSystem fs, Path p) throws IOException {
assertTrue(fs.mkdirs(p));
assertTrue(fs.exists(p));
assertTrue(fs.getFileStatus(p).isDirectory());
return p;
}
static File createLocalFile(File f) throws IOException {
assertTrue(!f.exists());
PrintWriter out = new PrintWriter(f);
out.print("createLocalFile: " + f.getAbsolutePath());
out.flush();
out.close();
assertTrue(f.exists());
assertTrue(f.isFile());
return f;
}
static File createLocalFileWithRandomData(int fileLength, File f)
throws IOException {
assertTrue(!f.exists());
f.createNewFile();
FileOutputStream out = new FileOutputStream(f.toString());
byte[] buffer = new byte[fileLength];
out.write(buffer);
out.flush();
out.close();
return f;
}
static void show(String s) {
System.out.println(Thread.currentThread().getStackTrace()[2] + " " + s);
}
@Test (timeout = 30000)
public void testZeroSizeFile() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs = cluster.getFileSystem();
assertTrue("Not a HDFS: "+fs.getUri(),
fs instanceof DistributedFileSystem);
final DistributedFileSystem dfs = (DistributedFileSystem)fs;
try {
//create a zero size file
final File f1 = new File(TEST_ROOT_DIR, "f1");
assertTrue(!f1.exists());
assertTrue(f1.createNewFile());
assertTrue(f1.exists());
assertTrue(f1.isFile());
assertEquals(0L, f1.length());
//copy to remote
final Path root = mkdir(dfs, new Path("/test/zeroSizeFile"));
final Path remotef = new Path(root, "dst");
show("copy local " + f1 + " to remote " + remotef);
dfs.copyFromLocalFile(false, false, new Path(f1.getPath()), remotef);
//getBlockSize() should not throw exception
show("Block size = " + dfs.getFileStatus(remotef).getBlockSize());
//copy back
final File f2 = new File(TEST_ROOT_DIR, "f2");
assertTrue(!f2.exists());
dfs.copyToLocalFile(remotef, new Path(f2.getPath()));
assertTrue(f2.exists());
assertTrue(f2.isFile());
assertEquals(0L, f2.length());
f1.delete();
f2.delete();
} finally {
try {dfs.close();} catch (Exception e) {}
cluster.shutdown();
}
}
@Test (timeout = 30000)
public void testRecursiveRm() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs = cluster.getFileSystem();
assertTrue("Not a HDFS: " + fs.getUri(),
fs instanceof DistributedFileSystem);
try {
fs.mkdirs(new Path(new Path("parent"), "child"));
try {
fs.delete(new Path("parent"), false);
assert(false); // should never reach here.
} catch(IOException e) {
//should have thrown an exception
}
try {
fs.delete(new Path("parent"), true);
} catch(IOException e) {
assert(false);
}
} finally {
try { fs.close();}catch(IOException e){};
cluster.shutdown();
}
}
@Test (timeout = 30000)
public void testDu() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
DistributedFileSystem fs = cluster.getFileSystem();
PrintStream psBackup = System.out;
ByteArrayOutputStream out = new ByteArrayOutputStream();
PrintStream psOut = new PrintStream(out);
System.setOut(psOut);
FsShell shell = new FsShell();
shell.setConf(conf);
try {
Path myPath = new Path("/test/dir");
assertTrue(fs.mkdirs(myPath));
assertTrue(fs.exists(myPath));
Path myFile = new Path("/test/dir/file");
writeFile(fs, myFile);
assertTrue(fs.exists(myFile));
Path myFile2 = new Path("/test/dir/file2");
writeFile(fs, myFile2);
assertTrue(fs.exists(myFile2));
Long myFileLength = fs.getFileStatus(myFile).getLen();
Long myFile2Length = fs.getFileStatus(myFile2).getLen();
String[] args = new String[2];
args[0] = "-du";
args[1] = "/test/dir";
int val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
assertTrue(val == 0);
String returnString = out.toString();
out.reset();
// Check if size matchs as expected
assertThat(returnString, containsString(myFileLength.toString()));
assertThat(returnString, containsString(myFile2Length.toString()));
// Check that -du -s reports the state of the snapshot
String snapshotName = "ss1";
Path snapshotPath = new Path(myPath, ".snapshot/" + snapshotName);
fs.allowSnapshot(myPath);
assertThat(fs.createSnapshot(myPath, snapshotName), is(snapshotPath));
assertThat(fs.delete(myFile, false), is(true));
assertThat(fs.exists(myFile), is(false));
args = new String[3];
args[0] = "-du";
args[1] = "-s";
args[2] = snapshotPath.toString();
val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
assertThat(val, is(0));
returnString = out.toString();
out.reset();
Long combinedLength = myFileLength + myFile2Length;
assertThat(returnString, containsString(combinedLength.toString()));
} finally {
System.setOut(psBackup);
cluster.shutdown();
}
}
@Test (timeout = 30000)
public void testPut() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs = cluster.getFileSystem();
assertTrue("Not a HDFS: "+fs.getUri(),
fs instanceof DistributedFileSystem);
final DistributedFileSystem dfs = (DistributedFileSystem)fs;
try {
// remove left over crc files:
new File(TEST_ROOT_DIR, ".f1.crc").delete();
new File(TEST_ROOT_DIR, ".f2.crc").delete();
final File f1 = createLocalFile(new File(TEST_ROOT_DIR, "f1"));
final File f2 = createLocalFile(new File(TEST_ROOT_DIR, "f2"));
final Path root = mkdir(dfs, new Path("/test/put"));
final Path dst = new Path(root, "dst");
show("begin");
final Thread copy2ndFileThread = new Thread() {
@Override
public void run() {
try {
show("copy local " + f2 + " to remote " + dst);
dfs.copyFromLocalFile(false, false, new Path(f2.getPath()), dst);
} catch (IOException ioe) {
show("good " + StringUtils.stringifyException(ioe));
return;
}
//should not be here, must got IOException
assertTrue(false);
}
};
//use SecurityManager to pause the copying of f1 and begin copying f2
SecurityManager sm = System.getSecurityManager();
System.out.println("SecurityManager = " + sm);
System.setSecurityManager(new SecurityManager() {
private boolean firstTime = true;
@Override
public void checkPermission(Permission perm) {
if (firstTime) {
Thread t = Thread.currentThread();
if (!t.toString().contains("DataNode")) {
String s = "" + Arrays.asList(t.getStackTrace());
if (s.contains("FileUtil.copyContent")) {
//pause at FileUtil.copyContent
firstTime = false;
copy2ndFileThread.start();
try {Thread.sleep(5000);} catch (InterruptedException e) {}
}
}
}
}
});
show("copy local " + f1 + " to remote " + dst);
dfs.copyFromLocalFile(false, false, new Path(f1.getPath()), dst);
show("done");
try {copy2ndFileThread.join();} catch (InterruptedException e) { }
System.setSecurityManager(sm);
// copy multiple files to destination directory
final Path destmultiple = mkdir(dfs, new Path("/test/putmultiple"));
Path[] srcs = new Path[2];
srcs[0] = new Path(f1.getPath());
srcs[1] = new Path(f2.getPath());
dfs.copyFromLocalFile(false, false, srcs, destmultiple);
srcs[0] = new Path(destmultiple,"f1");
srcs[1] = new Path(destmultiple,"f2");
assertTrue(dfs.exists(srcs[0]));
assertTrue(dfs.exists(srcs[1]));
// move multiple files to destination directory
final Path destmultiple2 = mkdir(dfs, new Path("/test/movemultiple"));
srcs[0] = new Path(f1.getPath());
srcs[1] = new Path(f2.getPath());
dfs.moveFromLocalFile(srcs, destmultiple2);
assertFalse(f1.exists());
assertFalse(f2.exists());
srcs[0] = new Path(destmultiple2, "f1");
srcs[1] = new Path(destmultiple2, "f2");
assertTrue(dfs.exists(srcs[0]));
assertTrue(dfs.exists(srcs[1]));
f1.delete();
f2.delete();
} finally {
try {dfs.close();} catch (Exception e) {}
cluster.shutdown();
}
}
/** check command error outputs and exit statuses. */
@Test (timeout = 30000)
public void testErrOutPut() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
PrintStream bak = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem srcFs = cluster.getFileSystem();
Path root = new Path("/nonexistentfile");
bak = System.err;
ByteArrayOutputStream out = new ByteArrayOutputStream();
PrintStream tmp = new PrintStream(out);
System.setErr(tmp);
String[] argv = new String[2];
argv[0] = "-cat";
argv[1] = root.toUri().getPath();
int ret = ToolRunner.run(new FsShell(), argv);
assertEquals(" -cat returned 1 ", 1, ret);
String returned = out.toString();
assertTrue("cat does not print exceptions ",
(returned.lastIndexOf("Exception") == -1));
out.reset();
argv[0] = "-rm";
argv[1] = root.toString();
FsShell shell = new FsShell();
shell.setConf(conf);
ret = ToolRunner.run(shell, argv);
assertEquals(" -rm returned 1 ", 1, ret);
returned = out.toString();
out.reset();
assertTrue("rm prints reasonable error ",
(returned.lastIndexOf("No such file or directory") != -1));
argv[0] = "-rmr";
argv[1] = root.toString();
ret = ToolRunner.run(shell, argv);
assertEquals(" -rmr returned 1", 1, ret);
returned = out.toString();
assertTrue("rmr prints reasonable error ",
(returned.lastIndexOf("No such file or directory") != -1));
out.reset();
argv[0] = "-du";
argv[1] = "/nonexistentfile";
ret = ToolRunner.run(shell, argv);
returned = out.toString();
assertTrue(" -du prints reasonable error ",
(returned.lastIndexOf("No such file or directory") != -1));
out.reset();
argv[0] = "-dus";
argv[1] = "/nonexistentfile";
ret = ToolRunner.run(shell, argv);
returned = out.toString();
assertTrue(" -dus prints reasonable error",
(returned.lastIndexOf("No such file or directory") != -1));
out.reset();
argv[0] = "-ls";
argv[1] = "/nonexistenfile";
ret = ToolRunner.run(shell, argv);
returned = out.toString();
assertTrue(" -ls does not return Found 0 items",
(returned.lastIndexOf("Found 0") == -1));
out.reset();
argv[0] = "-ls";
argv[1] = "/nonexistentfile";
ret = ToolRunner.run(shell, argv);
assertEquals(" -lsr should fail ", 1, ret);
out.reset();
srcFs.mkdirs(new Path("/testdir"));
argv[0] = "-ls";
argv[1] = "/testdir";
ret = ToolRunner.run(shell, argv);
returned = out.toString();
assertTrue(" -ls does not print out anything ",
(returned.lastIndexOf("Found 0") == -1));
out.reset();
argv[0] = "-ls";
argv[1] = "/user/nonxistant/*";
ret = ToolRunner.run(shell, argv);
assertEquals(" -ls on nonexistent glob returns 1", 1, ret);
out.reset();
argv[0] = "-mkdir";
argv[1] = "/testdir";
ret = ToolRunner.run(shell, argv);
returned = out.toString();
assertEquals(" -mkdir returned 1 ", 1, ret);
assertTrue(" -mkdir returned File exists",
(returned.lastIndexOf("File exists") != -1));
Path testFile = new Path("/testfile");
OutputStream outtmp = srcFs.create(testFile);
outtmp.write(testFile.toString().getBytes());
outtmp.close();
out.reset();
argv[0] = "-mkdir";
argv[1] = "/testfile";
ret = ToolRunner.run(shell, argv);
returned = out.toString();
assertEquals(" -mkdir returned 1", 1, ret);
assertTrue(" -mkdir returned this is a file ",
(returned.lastIndexOf("not a directory") != -1));
out.reset();
argv = new String[3];
argv[0] = "-mv";
argv[1] = "/testfile";
argv[2] = "file";
ret = ToolRunner.run(shell, argv);
assertEquals("mv failed to rename", 1, ret);
out.reset();
argv = new String[3];
argv[0] = "-mv";
argv[1] = "/testfile";
argv[2] = "/testfiletest";
ret = ToolRunner.run(shell, argv);
returned = out.toString();
assertTrue("no output from rename",
(returned.lastIndexOf("Renamed") == -1));
out.reset();
argv[0] = "-mv";
argv[1] = "/testfile";
argv[2] = "/testfiletmp";
ret = ToolRunner.run(shell, argv);
returned = out.toString();
assertTrue(" unix like output",
(returned.lastIndexOf("No such file or") != -1));
out.reset();
argv = new String[1];
argv[0] = "-du";
srcFs.mkdirs(srcFs.getHomeDirectory());
ret = ToolRunner.run(shell, argv);
returned = out.toString();
assertEquals(" no error ", 0, ret);
assertTrue("empty path specified",
(returned.lastIndexOf("empty string") == -1));
out.reset();
argv = new String[3];
argv[0] = "-test";
argv[1] = "-d";
argv[2] = "/no/such/dir";
ret = ToolRunner.run(shell, argv);
returned = out.toString();
assertEquals(" -test -d wrong result ", 1, ret);
assertTrue(returned.isEmpty());
} finally {
if (bak != null) {
System.setErr(bak);
}
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test (timeout = 30000)
public void testURIPaths() throws Exception {
Configuration srcConf = new HdfsConfiguration();
Configuration dstConf = new HdfsConfiguration();
MiniDFSCluster srcCluster = null;
MiniDFSCluster dstCluster = null;
File bak = new File(PathUtils.getTestDir(getClass()), "dfs_tmp_uri");
bak.mkdirs();
try{
srcCluster = new MiniDFSCluster.Builder(srcConf).numDataNodes(2).build();
dstConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, bak.getAbsolutePath());
dstCluster = new MiniDFSCluster.Builder(dstConf).numDataNodes(2).build();
FileSystem srcFs = srcCluster.getFileSystem();
FileSystem dstFs = dstCluster.getFileSystem();
FsShell shell = new FsShell();
shell.setConf(srcConf);
//check for ls
String[] argv = new String[2];
argv[0] = "-ls";
argv[1] = dstFs.getUri().toString() + "/";
int ret = ToolRunner.run(shell, argv);
assertEquals("ls works on remote uri ", 0, ret);
//check for rm -r
dstFs.mkdirs(new Path("/hadoopdir"));
argv = new String[2];
argv[0] = "-rmr";
argv[1] = dstFs.getUri().toString() + "/hadoopdir";
ret = ToolRunner.run(shell, argv);
assertEquals("-rmr works on remote uri " + argv[1], 0, ret);
//check du
argv[0] = "-du";
argv[1] = dstFs.getUri().toString() + "/";
ret = ToolRunner.run(shell, argv);
assertEquals("du works on remote uri ", 0, ret);
//check put
File furi = new File(TEST_ROOT_DIR, "furi");
createLocalFile(furi);
argv = new String[3];
argv[0] = "-put";
argv[1] = furi.toURI().toString();
argv[2] = dstFs.getUri().toString() + "/furi";
ret = ToolRunner.run(shell, argv);
assertEquals(" put is working ", 0, ret);
//check cp
argv[0] = "-cp";
argv[1] = dstFs.getUri().toString() + "/furi";
argv[2] = srcFs.getUri().toString() + "/furi";
ret = ToolRunner.run(shell, argv);
assertEquals(" cp is working ", 0, ret);
assertTrue(srcFs.exists(new Path("/furi")));
//check cat
argv = new String[2];
argv[0] = "-cat";
argv[1] = dstFs.getUri().toString() + "/furi";
ret = ToolRunner.run(shell, argv);
assertEquals(" cat is working ", 0, ret);
//check chown
dstFs.delete(new Path("/furi"), true);
dstFs.delete(new Path("/hadoopdir"), true);
String file = "/tmp/chownTest";
Path path = new Path(file);
Path parent = new Path("/tmp");
Path root = new Path("/");
TestDFSShell.writeFile(dstFs, path);
runCmd(shell, "-chgrp", "-R", "herbivores", dstFs.getUri().toString() +"/*");
confirmOwner(null, "herbivores", dstFs, parent, path);
runCmd(shell, "-chown", "-R", ":reptiles", dstFs.getUri().toString() + "/");
confirmOwner(null, "reptiles", dstFs, root, parent, path);
//check if default hdfs:/// works
argv[0] = "-cat";
argv[1] = "hdfs:///furi";
ret = ToolRunner.run(shell, argv);
assertEquals(" default works for cat", 0, ret);
argv[0] = "-ls";
argv[1] = "hdfs:///";
ret = ToolRunner.run(shell, argv);
assertEquals("default works for ls ", 0, ret);
argv[0] = "-rmr";
argv[1] = "hdfs:///furi";
ret = ToolRunner.run(shell, argv);
assertEquals("default works for rm/rmr", 0, ret);
} finally {
if (null != srcCluster) {
srcCluster.shutdown();
}
if (null != dstCluster) {
dstCluster.shutdown();
}
}
}
@Test (timeout = 30000)
public void testText() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
final FileSystem dfs = cluster.getFileSystem();
textTest(new Path("/texttest").makeQualified(dfs.getUri(),
dfs.getWorkingDirectory()), conf);
conf.set("fs.defaultFS", dfs.getUri().toString());
final FileSystem lfs = FileSystem.getLocal(conf);
textTest(new Path(TEST_ROOT_DIR, "texttest").makeQualified(lfs.getUri(),
lfs.getWorkingDirectory()), conf);
} finally {
if (null != cluster) {
cluster.shutdown();
}
}
}
private void textTest(Path root, Configuration conf) throws Exception {
PrintStream bak = null;
try {
final FileSystem fs = root.getFileSystem(conf);
fs.mkdirs(root);
// Test the gzip type of files. Magic detection.
OutputStream zout = new GZIPOutputStream(
fs.create(new Path(root, "file.gz")));
Random r = new Random();
bak = System.out;
ByteArrayOutputStream file = new ByteArrayOutputStream();
for (int i = 0; i < 1024; ++i) {
char c = Character.forDigit(r.nextInt(26) + 10, 36);
file.write(c);
zout.write(c);
}
zout.close();
ByteArrayOutputStream out = new ByteArrayOutputStream();
System.setOut(new PrintStream(out));
String[] argv = new String[2];
argv[0] = "-text";
argv[1] = new Path(root, "file.gz").toString();
int ret = ToolRunner.run(new FsShell(conf), argv);
assertEquals("'-text " + argv[1] + " returned " + ret, 0, ret);
assertTrue("Output doesn't match input",
Arrays.equals(file.toByteArray(), out.toByteArray()));
// Create a sequence file with a gz extension, to test proper
// container detection. Magic detection.
SequenceFile.Writer writer = SequenceFile.createWriter(
conf,
SequenceFile.Writer.file(new Path(root, "file.gz")),
SequenceFile.Writer.keyClass(Text.class),
SequenceFile.Writer.valueClass(Text.class));
writer.append(new Text("Foo"), new Text("Bar"));
writer.close();
out = new ByteArrayOutputStream();
System.setOut(new PrintStream(out));
argv = new String[2];
argv[0] = "-text";
argv[1] = new Path(root, "file.gz").toString();
ret = ToolRunner.run(new FsShell(conf), argv);
assertEquals("'-text " + argv[1] + " returned " + ret, 0, ret);
assertTrue("Output doesn't match input",
Arrays.equals("Foo\tBar\n".getBytes(), out.toByteArray()));
out.reset();
// Test deflate. Extension-based detection.
OutputStream dout = new DeflaterOutputStream(
fs.create(new Path(root, "file.deflate")));
byte[] outbytes = "foo".getBytes();
dout.write(outbytes);
dout.close();
out = new ByteArrayOutputStream();
System.setOut(new PrintStream(out));
argv = new String[2];
argv[0] = "-text";
argv[1] = new Path(root, "file.deflate").toString();
ret = ToolRunner.run(new FsShell(conf), argv);
assertEquals("'-text " + argv[1] + " returned " + ret, 0, ret);
assertTrue("Output doesn't match input",
Arrays.equals(outbytes, out.toByteArray()));
out.reset();
// Test a simple codec. Extension based detection. We use
// Bzip2 cause its non-native.
CompressionCodec codec = ReflectionUtils.newInstance(BZip2Codec.class, conf);
String extension = codec.getDefaultExtension();
Path p = new Path(root, "file." + extension);
OutputStream fout = new DataOutputStream(codec.createOutputStream(
fs.create(p, true)));
byte[] writebytes = "foo".getBytes();
fout.write(writebytes);
fout.close();
out = new ByteArrayOutputStream();
System.setOut(new PrintStream(out));
argv = new String[2];
argv[0] = "-text";
argv[1] = new Path(root, p).toString();
ret = ToolRunner.run(new FsShell(conf), argv);
assertEquals("'-text " + argv[1] + " returned " + ret, 0, ret);
assertTrue("Output doesn't match input",
Arrays.equals(writebytes, out.toByteArray()));
out.reset();
} finally {
if (null != bak) {
System.setOut(bak);
}
}
}
@Test (timeout = 30000)
public void testCopyToLocal() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs = cluster.getFileSystem();
assertTrue("Not a HDFS: "+fs.getUri(),
fs instanceof DistributedFileSystem);
DistributedFileSystem dfs = (DistributedFileSystem)fs;
FsShell shell = new FsShell();
shell.setConf(conf);
try {
String root = createTree(dfs, "copyToLocal");
// Verify copying the tree
{
try {
assertEquals(0,
runCmd(shell, "-copyToLocal", root + "*", TEST_ROOT_DIR));
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
File localroot = new File(TEST_ROOT_DIR, "copyToLocal");
File localroot2 = new File(TEST_ROOT_DIR, "copyToLocal2");
File f1 = new File(localroot, "f1");
assertTrue("Copying failed.", f1.isFile());
File f2 = new File(localroot, "f2");
assertTrue("Copying failed.", f2.isFile());
File sub = new File(localroot, "sub");
assertTrue("Copying failed.", sub.isDirectory());
File f3 = new File(sub, "f3");
assertTrue("Copying failed.", f3.isFile());
File f4 = new File(sub, "f4");
assertTrue("Copying failed.", f4.isFile());
File f5 = new File(localroot2, "f1");
assertTrue("Copying failed.", f5.isFile());
f1.delete();
f2.delete();
f3.delete();
f4.delete();
f5.delete();
sub.delete();
}
// Verify copying non existing sources do not create zero byte
// destination files
{
String[] args = {"-copyToLocal", "nosuchfile", TEST_ROOT_DIR};
try {
assertEquals(1, shell.run(args));
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
File f6 = new File(TEST_ROOT_DIR, "nosuchfile");
assertTrue(!f6.exists());
}
} finally {
try {
dfs.close();
} catch (Exception e) {
}
cluster.shutdown();
}
}
static String createTree(FileSystem fs, String name) throws IOException {
// create a tree
// ROOT
// |- f1
// |- f2
// + sub
// |- f3
// |- f4
// ROOT2
// |- f1
String path = "/test/" + name;
Path root = mkdir(fs, new Path(path));
Path sub = mkdir(fs, new Path(root, "sub"));
Path root2 = mkdir(fs, new Path(path + "2"));
writeFile(fs, new Path(root, "f1"));
writeFile(fs, new Path(root, "f2"));
writeFile(fs, new Path(sub, "f3"));
writeFile(fs, new Path(sub, "f4"));
writeFile(fs, new Path(root2, "f1"));
mkdir(fs, new Path(root2, "sub"));
return path;
}
@Test (timeout = 30000)
public void testCount() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
DistributedFileSystem dfs = cluster.getFileSystem();
FsShell shell = new FsShell();
shell.setConf(conf);
try {
String root = createTree(dfs, "count");
// Verify the counts
runCount(root, 2, 4, shell);
runCount(root + "2", 2, 1, shell);
runCount(root + "2/f1", 0, 1, shell);
runCount(root + "2/sub", 1, 0, shell);
final FileSystem localfs = FileSystem.getLocal(conf);
Path localpath = new Path(TEST_ROOT_DIR, "testcount");
localpath = localpath.makeQualified(localfs.getUri(),
localfs.getWorkingDirectory());
localfs.mkdirs(localpath);
final String localstr = localpath.toString();
System.out.println("localstr=" + localstr);
runCount(localstr, 1, 0, shell);
assertEquals(0, runCmd(shell, "-count", root, localstr));
} finally {
try {
dfs.close();
} catch (Exception e) {
}
cluster.shutdown();
}
}
@Test(timeout = 30000)
public void testTotalSizeOfAllFiles() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
FileSystem fs = cluster.getFileSystem();
// create file under root
FSDataOutputStream File1 = fs.create(new Path("/File1"));
File1.write("hi".getBytes());
File1.close();
// create file under sub-folder
FSDataOutputStream File2 = fs.create(new Path("/Folder1/File2"));
File2.write("hi".getBytes());
File2.close();
// getUsed() should return total length of all the files in Filesystem
assertEquals(4, fs.getUsed());
} finally {
if (cluster != null) {
cluster.shutdown();
cluster = null;
}
}
}
private static void runCount(String path, long dirs, long files, FsShell shell
) throws IOException {
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
PrintStream out = new PrintStream(bytes);
PrintStream oldOut = System.out;
System.setOut(out);
Scanner in = null;
String results = null;
try {
runCmd(shell, "-count", path);
results = bytes.toString();
in = new Scanner(results);
assertEquals(dirs, in.nextLong());
assertEquals(files, in.nextLong());
} finally {
if (in!=null) in.close();
IOUtils.closeStream(out);
System.setOut(oldOut);
System.out.println("results:\n" + results);
}
}
//throws IOException instead of Exception as shell.run() does.
private static int runCmd(FsShell shell, String... args) throws IOException {
StringBuilder cmdline = new StringBuilder("RUN:");
for (String arg : args) cmdline.append(" " + arg);
LOG.info(cmdline.toString());
try {
int exitCode;
exitCode = shell.run(args);
LOG.info("RUN: "+args[0]+" exit=" + exitCode);
return exitCode;
} catch (IOException e) {
LOG.error("RUN: "+args[0]+" IOException="+e.getMessage());
throw e;
} catch (RuntimeException e) {
LOG.error("RUN: "+args[0]+" RuntimeException="+e.getMessage());
throw e;
} catch (Exception e) {
LOG.error("RUN: "+args[0]+" Exception="+e.getMessage());
throw new IOException(StringUtils.stringifyException(e));
}
}
/**
* Test chmod.
*/
void testChmod(Configuration conf, FileSystem fs, String chmodDir)
throws IOException {
FsShell shell = new FsShell();
shell.setConf(conf);
try {
//first make dir
Path dir = new Path(chmodDir);
fs.delete(dir, true);
fs.mkdirs(dir);
confirmPermissionChange(/* Setting */ "u+rwx,g=rw,o-rwx",
/* Should give */ "rwxrw----", fs, shell, dir);
//create an empty file
Path file = new Path(chmodDir, "file");
TestDFSShell.writeFile(fs, file);
//test octal mode
confirmPermissionChange("644", "rw-r--r--", fs, shell, file);
//test recursive
runCmd(shell, "-chmod", "-R", "a+rwX", chmodDir);
assertEquals("rwxrwxrwx",
fs.getFileStatus(dir).getPermission().toString());
assertEquals("rw-rw-rw-",
fs.getFileStatus(file).getPermission().toString());
// Skip "sticky bit" tests on Windows.
//
if (!Path.WINDOWS) {
// test sticky bit on directories
Path dir2 = new Path(dir, "stickybit");
fs.mkdirs(dir2);
LOG.info("Testing sticky bit on: " + dir2);
LOG.info("Sticky bit directory initial mode: " +
fs.getFileStatus(dir2).getPermission());
confirmPermissionChange("u=rwx,g=rx,o=rx", "rwxr-xr-x", fs, shell, dir2);
confirmPermissionChange("+t", "rwxr-xr-t", fs, shell, dir2);
confirmPermissionChange("-t", "rwxr-xr-x", fs, shell, dir2);
confirmPermissionChange("=t", "--------T", fs, shell, dir2);
confirmPermissionChange("0000", "---------", fs, shell, dir2);
confirmPermissionChange("1666", "rw-rw-rwT", fs, shell, dir2);
confirmPermissionChange("777", "rwxrwxrwt", fs, shell, dir2);
fs.delete(dir2, true);
} else {
LOG.info("Skipped sticky bit tests on Windows");
}
fs.delete(dir, true);
} finally {
try {
fs.close();
shell.close();
} catch (IOException ignored) {}
}
}
// Apply a new permission to a path and confirm that the new permission
// is the one you were expecting
private void confirmPermissionChange(String toApply, String expected,
FileSystem fs, FsShell shell, Path dir2) throws IOException {
LOG.info("Confirming permission change of " + toApply + " to " + expected);
runCmd(shell, "-chmod", toApply, dir2.toString());
String result = fs.getFileStatus(dir2).getPermission().toString();
LOG.info("Permission change result: " + result);
assertEquals(expected, result);
}
private void confirmOwner(String owner, String group,
FileSystem fs, Path... paths) throws IOException {
for(Path path : paths) {
if (owner != null) {
assertEquals(owner, fs.getFileStatus(path).getOwner());
}
if (group != null) {
assertEquals(group, fs.getFileStatus(path).getGroup());
}
}
}
@Test (timeout = 30000)
public void testFilePermissions() throws IOException {
Configuration conf = new HdfsConfiguration();
//test chmod on local fs
FileSystem fs = FileSystem.getLocal(conf);
testChmod(conf, fs,
(new File(TEST_ROOT_DIR, "chmodTest")).getAbsolutePath());
conf.set(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, "true");
//test chmod on DFS
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
fs = cluster.getFileSystem();
testChmod(conf, fs, "/tmp/chmodTest");
// test chown and chgrp on DFS:
FsShell shell = new FsShell();
shell.setConf(conf);
fs = cluster.getFileSystem();
/* For dfs, I am the super user and I can change owner of any file to
* anything. "-R" option is already tested by chmod test above.
*/
String file = "/tmp/chownTest";
Path path = new Path(file);
Path parent = new Path("/tmp");
Path root = new Path("/");
TestDFSShell.writeFile(fs, path);
runCmd(shell, "-chgrp", "-R", "herbivores", "/*", "unknownFile*");
confirmOwner(null, "herbivores", fs, parent, path);
runCmd(shell, "-chgrp", "mammals", file);
confirmOwner(null, "mammals", fs, path);
runCmd(shell, "-chown", "-R", ":reptiles", "/");
confirmOwner(null, "reptiles", fs, root, parent, path);
runCmd(shell, "-chown", "python:", "/nonExistentFile", file);
confirmOwner("python", "reptiles", fs, path);
runCmd(shell, "-chown", "-R", "hadoop:toys", "unknownFile", "/");
confirmOwner("hadoop", "toys", fs, root, parent, path);
// Test different characters in names
runCmd(shell, "-chown", "hdfs.user", file);
confirmOwner("hdfs.user", null, fs, path);
runCmd(shell, "-chown", "_Hdfs.User-10:_hadoop.users--", file);
confirmOwner("_Hdfs.User-10", "_hadoop.users--", fs, path);
runCmd(shell, "-chown", "hdfs/[email protected]:asf-projects", file);
confirmOwner("hdfs/[email protected]", "asf-projects", fs, path);
runCmd(shell, "-chgrp", "[email protected]/100", file);
confirmOwner(null, "[email protected]/100", fs, path);
cluster.shutdown();
}
/**
* Tests various options of DFSShell.
*/
@Test (timeout = 120000)
public void testDFSShell() throws IOException {
Configuration conf = new HdfsConfiguration();
/* This tests some properties of ChecksumFileSystem as well.
* Make sure that we create ChecksumDFS */
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs = cluster.getFileSystem();
assertTrue("Not a HDFS: "+fs.getUri(),
fs instanceof DistributedFileSystem);
DistributedFileSystem fileSys = (DistributedFileSystem)fs;
FsShell shell = new FsShell();
shell.setConf(conf);
try {
// First create a new directory with mkdirs
Path myPath = new Path("/test/mkdirs");
assertTrue(fileSys.mkdirs(myPath));
assertTrue(fileSys.exists(myPath));
assertTrue(fileSys.mkdirs(myPath));
// Second, create a file in that directory.
Path myFile = new Path("/test/mkdirs/myFile");
writeFile(fileSys, myFile);
assertTrue(fileSys.exists(myFile));
Path myFile2 = new Path("/test/mkdirs/myFile2");
writeFile(fileSys, myFile2);
assertTrue(fileSys.exists(myFile2));
// Verify that rm with a pattern
{
String[] args = new String[2];
args[0] = "-rm";
args[1] = "/test/mkdirs/myFile*";
int val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
assertTrue(val == 0);
assertFalse(fileSys.exists(myFile));
assertFalse(fileSys.exists(myFile2));
//re-create the files for other tests
writeFile(fileSys, myFile);
assertTrue(fileSys.exists(myFile));
writeFile(fileSys, myFile2);
assertTrue(fileSys.exists(myFile2));
}
// Verify that we can read the file
{
String[] args = new String[3];
args[0] = "-cat";
args[1] = "/test/mkdirs/myFile";
args[2] = "/test/mkdirs/myFile2";
int val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run: " +
StringUtils.stringifyException(e));
}
assertTrue(val == 0);
}
fileSys.delete(myFile2, true);
// Verify that we get an error while trying to read an nonexistent file
{
String[] args = new String[2];
args[0] = "-cat";
args[1] = "/test/mkdirs/myFile1";
int val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
assertTrue(val != 0);
}
// Verify that we get an error while trying to delete an nonexistent file
{
String[] args = new String[2];
args[0] = "-rm";
args[1] = "/test/mkdirs/myFile1";
int val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
assertTrue(val != 0);
}
// Verify that we succeed in removing the file we created
{
String[] args = new String[2];
args[0] = "-rm";
args[1] = "/test/mkdirs/myFile";
int val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
assertTrue(val == 0);
}
// Verify touch/test
{
String[] args;
int val;
args = new String[3];
args[0] = "-test";
args[1] = "-e";
args[2] = "/test/mkdirs/noFileHere";
val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
assertEquals(1, val);
args[1] = "-z";
val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
assertEquals(1, val);
args = new String[2];
args[0] = "-touchz";
args[1] = "/test/mkdirs/isFileHere";
val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
assertEquals(0, val);
args = new String[2];
args[0] = "-touchz";
args[1] = "/test/mkdirs/thisDirNotExists/isFileHere";
val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
assertEquals(1, val);
args = new String[3];
args[0] = "-test";
args[1] = "-e";
args[2] = "/test/mkdirs/isFileHere";
val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
assertEquals(0, val);
args[1] = "-d";
val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
assertEquals(1, val);
args[1] = "-z";
val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
assertEquals(0, val);
}
// Verify that cp from a directory to a subdirectory fails
{
String[] args = new String[2];
args[0] = "-mkdir";
args[1] = "/test/dir1";
int val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
assertEquals(0, val);
// this should fail
String[] args1 = new String[3];
args1[0] = "-cp";
args1[1] = "/test/dir1";
args1[2] = "/test/dir1/dir2";
val = 0;
try {
val = shell.run(args1);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
assertEquals(1, val);
// this should succeed
args1[0] = "-cp";
args1[1] = "/test/dir1";
args1[2] = "/test/dir1foo";
val = -1;
try {
val = shell.run(args1);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
assertEquals(0, val);
}
// Verify -test -f negative case (missing file)
{
String[] args = new String[3];
args[0] = "-test";
args[1] = "-f";
args[2] = "/test/mkdirs/noFileHere";
int val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
assertEquals(1, val);
}
// Verify -test -f negative case (directory rather than file)
{
String[] args = new String[3];
args[0] = "-test";
args[1] = "-f";
args[2] = "/test/mkdirs";
int val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
assertEquals(1, val);
}
// Verify -test -f positive case
{
writeFile(fileSys, myFile);
assertTrue(fileSys.exists(myFile));
String[] args = new String[3];
args[0] = "-test";
args[1] = "-f";
args[2] = myFile.toString();
int val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
assertEquals(0, val);
}
// Verify -test -s negative case (missing file)
{
String[] args = new String[3];
args[0] = "-test";
args[1] = "-s";
args[2] = "/test/mkdirs/noFileHere";
int val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
assertEquals(1, val);
}
// Verify -test -s negative case (zero length file)
{
String[] args = new String[3];
args[0] = "-test";
args[1] = "-s";
args[2] = "/test/mkdirs/isFileHere";
int val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
assertEquals(1, val);
}
// Verify -test -s positive case (nonzero length file)
{
String[] args = new String[3];
args[0] = "-test";
args[1] = "-s";
args[2] = myFile.toString();
int val = -1;
try {
val = shell.run(args);
} catch (Exception e) {
System.err.println("Exception raised from DFSShell.run " +
e.getLocalizedMessage());
}
assertEquals(0, val);
}
} finally {
try {
fileSys.close();
} catch (Exception e) {
}
cluster.shutdown();
}
}
static List<File> getBlockFiles(MiniDFSCluster cluster) throws IOException {
List<File> files = new ArrayList<File>();
List<DataNode> datanodes = cluster.getDataNodes();
String poolId = cluster.getNamesystem().getBlockPoolId();
List<Map<DatanodeStorage, BlockListAsLongs>> blocks = cluster.getAllBlockReports(poolId);
for(int i = 0; i < blocks.size(); i++) {
DataNode dn = datanodes.get(i);
Map<DatanodeStorage, BlockListAsLongs> map = blocks.get(i);
for(Map.Entry<DatanodeStorage, BlockListAsLongs> e : map.entrySet()) {
for(Block b : e.getValue()) {
files.add(DataNodeTestUtils.getFile(dn, poolId, b.getBlockId()));
}
}
}
return files;
}
static void corrupt(List<File> files) throws IOException {
for(File f : files) {
StringBuilder content = new StringBuilder(DFSTestUtil.readFile(f));
char c = content.charAt(0);
content.setCharAt(0, ++c);
PrintWriter out = new PrintWriter(f);
out.print(content);
out.flush();
out.close();
}
}
static interface TestGetRunner {
String run(int exitcode, String... options) throws IOException;
}
@Test (timeout = 30000)
public void testRemoteException() throws Exception {
UserGroupInformation tmpUGI =
UserGroupInformation.createUserForTesting("tmpname", new String[] {"mygroup"});
MiniDFSCluster dfs = null;
PrintStream bak = null;
try {
final Configuration conf = new HdfsConfiguration();
dfs = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs = dfs.getFileSystem();
Path p = new Path("/foo");
fs.mkdirs(p);
fs.setPermission(p, new FsPermission((short)0700));
bak = System.err;
tmpUGI.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
FsShell fshell = new FsShell(conf);
ByteArrayOutputStream out = new ByteArrayOutputStream();
PrintStream tmp = new PrintStream(out);
System.setErr(tmp);
String[] args = new String[2];
args[0] = "-ls";
args[1] = "/foo";
int ret = ToolRunner.run(fshell, args);
assertEquals("returned should be 1", 1, ret);
String str = out.toString();
assertTrue("permission denied printed",
str.indexOf("Permission denied") != -1);
out.reset();
return null;
}
});
} finally {
if (bak != null) {
System.setErr(bak);
}
if (dfs != null) {
dfs.shutdown();
}
}
}
@Test (timeout = 30000)
public void testGet() throws IOException {
GenericTestUtils.setLogLevel(FSInputChecker.LOG, Level.ALL);
final String fname = "testGet.txt";
Path root = new Path("/test/get");
final Path remotef = new Path(root, fname);
final Configuration conf = new HdfsConfiguration();
// Set short retry timeouts so this test runs faster
conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
TestGetRunner runner = new TestGetRunner() {
private int count = 0;
private final FsShell shell = new FsShell(conf);
public String run(int exitcode, String... options) throws IOException {
String dst = new File(TEST_ROOT_DIR, fname + ++count)
.getAbsolutePath();
String[] args = new String[options.length + 3];
args[0] = "-get";
args[args.length - 2] = remotef.toString();
args[args.length - 1] = dst;
for(int i = 0; i < options.length; i++) {
args[i + 1] = options[i];
}
show("args=" + Arrays.asList(args));
try {
assertEquals(exitcode, shell.run(args));
} catch (Exception e) {
assertTrue(StringUtils.stringifyException(e), false);
}
return exitcode == 0? DFSTestUtil.readFile(new File(dst)): null;
}
};
File localf = createLocalFile(new File(TEST_ROOT_DIR, fname));
MiniDFSCluster cluster = null;
DistributedFileSystem dfs = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).format(true)
.build();
dfs = cluster.getFileSystem();
mkdir(dfs, root);
dfs.copyFromLocalFile(false, false, new Path(localf.getPath()), remotef);
String localfcontent = DFSTestUtil.readFile(localf);
assertEquals(localfcontent, runner.run(0));
assertEquals(localfcontent, runner.run(0, "-ignoreCrc"));
// find block files to modify later
List<File> files = getBlockFiles(cluster);
// Shut down cluster and then corrupt the block files by overwriting a
// portion with junk data. We must shut down the cluster so that threads
// in the data node do not hold locks on the block files while we try to
// write into them. Particularly on Windows, the data node's use of the
// FileChannel.transferTo method can cause block files to be memory mapped
// in read-only mode during the transfer to a client, and this causes a
// locking conflict. The call to shutdown the cluster blocks until all
// DataXceiver threads exit, preventing this problem.
dfs.close();
cluster.shutdown();
show("files=" + files);
corrupt(files);
// Start the cluster again, but do not reformat, so prior files remain.
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).format(false)
.build();
dfs = cluster.getFileSystem();
assertEquals(null, runner.run(1));
String corruptedcontent = runner.run(0, "-ignoreCrc");
assertEquals(localfcontent.substring(1), corruptedcontent.substring(1));
assertEquals(localfcontent.charAt(0)+1, corruptedcontent.charAt(0));
} finally {
if (null != dfs) {
try {
dfs.close();
} catch (Exception e) {
}
}
if (null != cluster) {
cluster.shutdown();
}
localf.delete();
}
}
@Test (timeout = 30000)
public void testLsr() throws Exception {
final Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
DistributedFileSystem dfs = cluster.getFileSystem();
try {
final String root = createTree(dfs, "lsr");
dfs.mkdirs(new Path(root, "zzz"));
runLsr(new FsShell(conf), root, 0);
final Path sub = new Path(root, "sub");
dfs.setPermission(sub, new FsPermission((short)0));
final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
final String tmpusername = ugi.getShortUserName() + "1";
UserGroupInformation tmpUGI = UserGroupInformation.createUserForTesting(
tmpusername, new String[] {tmpusername});
String results = tmpUGI.doAs(new PrivilegedExceptionAction<String>() {
@Override
public String run() throws Exception {
return runLsr(new FsShell(conf), root, 1);
}
});
assertTrue(results.contains("zzz"));
} finally {
cluster.shutdown();
}
}
private static String runLsr(final FsShell shell, String root, int returnvalue
) throws Exception {
System.out.println("root=" + root + ", returnvalue=" + returnvalue);
final ByteArrayOutputStream bytes = new ByteArrayOutputStream();
final PrintStream out = new PrintStream(bytes);
final PrintStream oldOut = System.out;
final PrintStream oldErr = System.err;
System.setOut(out);
System.setErr(out);
final String results;
try {
assertEquals(returnvalue, shell.run(new String[]{"-lsr", root}));
results = bytes.toString();
} finally {
IOUtils.closeStream(out);
System.setOut(oldOut);
System.setErr(oldErr);
}
System.out.println("results:\n" + results);
return results;
}
/**
* default setting is file:// which is not a DFS
* so DFSAdmin should throw and catch InvalidArgumentException
* and return -1 exit code.
* @throws Exception
*/
@Test (timeout = 30000)
public void testInvalidShell() throws Exception {
Configuration conf = new Configuration(); // default FS (non-DFS)
DFSAdmin admin = new DFSAdmin();
admin.setConf(conf);
int res = admin.run(new String[] {"-refreshNodes"});
assertEquals("expected to fail -1", res , -1);
}
// Preserve Copy Option is -ptopxa (timestamps, ownership, permission, XATTR,
// ACLs)
@Test (timeout = 120000)
public void testCopyCommandsWithPreserveOption() throws Exception {
Configuration conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
.format(true).build();
FsShell shell = null;
FileSystem fs = null;
final String testdir = "/tmp/TestDFSShell-testCopyCommandsWithPreserveOption-"
+ counter.getAndIncrement();
final Path hdfsTestDir = new Path(testdir);
try {
fs = cluster.getFileSystem();
fs.mkdirs(hdfsTestDir);
Path src = new Path(hdfsTestDir, "srcfile");
fs.create(src).close();
fs.setAcl(src, Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(ACCESS, GROUP, "bar", READ_EXECUTE),
aclEntry(ACCESS, OTHER, EXECUTE)));
FileStatus status = fs.getFileStatus(src);
final long mtime = status.getModificationTime();
final long atime = status.getAccessTime();
final String owner = status.getOwner();
final String group = status.getGroup();
final FsPermission perm = status.getPermission();
fs.setXAttr(src, USER_A1, USER_A1_VALUE);
fs.setXAttr(src, TRUSTED_A1, TRUSTED_A1_VALUE);
shell = new FsShell(conf);
// -p
Path target1 = new Path(hdfsTestDir, "targetfile1");
String[] argv = new String[] { "-cp", "-p", src.toUri().toString(),
target1.toUri().toString() };
int ret = ToolRunner.run(shell, argv);
assertEquals("cp -p is not working", SUCCESS, ret);
FileStatus targetStatus = fs.getFileStatus(target1);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
assertEquals(owner, targetStatus.getOwner());
assertEquals(group, targetStatus.getGroup());
FsPermission targetPerm = targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
Map<String, byte[]> xattrs = fs.getXAttrs(target1);
assertTrue(xattrs.isEmpty());
List<AclEntry> acls = fs.getAclStatus(target1).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
// -ptop
Path target2 = new Path(hdfsTestDir, "targetfile2");
argv = new String[] { "-cp", "-ptop", src.toUri().toString(),
target2.toUri().toString() };
ret = ToolRunner.run(shell, argv);
assertEquals("cp -ptop is not working", SUCCESS, ret);
targetStatus = fs.getFileStatus(target2);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
assertEquals(owner, targetStatus.getOwner());
assertEquals(group, targetStatus.getGroup());
targetPerm = targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs = fs.getXAttrs(target2);
assertTrue(xattrs.isEmpty());
acls = fs.getAclStatus(target2).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
// -ptopx
Path target3 = new Path(hdfsTestDir, "targetfile3");
argv = new String[] { "-cp", "-ptopx", src.toUri().toString(),
target3.toUri().toString() };
ret = ToolRunner.run(shell, argv);
assertEquals("cp -ptopx is not working", SUCCESS, ret);
targetStatus = fs.getFileStatus(target3);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
assertEquals(owner, targetStatus.getOwner());
assertEquals(group, targetStatus.getGroup());
targetPerm = targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs = fs.getXAttrs(target3);
assertEquals(xattrs.size(), 2);
assertArrayEquals(USER_A1_VALUE, xattrs.get(USER_A1));
assertArrayEquals(TRUSTED_A1_VALUE, xattrs.get(TRUSTED_A1));
acls = fs.getAclStatus(target3).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
// -ptopa
Path target4 = new Path(hdfsTestDir, "targetfile4");
argv = new String[] { "-cp", "-ptopa", src.toUri().toString(),
target4.toUri().toString() };
ret = ToolRunner.run(shell, argv);
assertEquals("cp -ptopa is not working", SUCCESS, ret);
targetStatus = fs.getFileStatus(target4);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
assertEquals(owner, targetStatus.getOwner());
assertEquals(group, targetStatus.getGroup());
targetPerm = targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs = fs.getXAttrs(target4);
assertTrue(xattrs.isEmpty());
acls = fs.getAclStatus(target4).getEntries();
assertFalse(acls.isEmpty());
assertTrue(targetPerm.getAclBit());
assertEquals(fs.getAclStatus(src), fs.getAclStatus(target4));
// -ptoa (verify -pa option will preserve permissions also)
Path target5 = new Path(hdfsTestDir, "targetfile5");
argv = new String[] { "-cp", "-ptoa", src.toUri().toString(),
target5.toUri().toString() };
ret = ToolRunner.run(shell, argv);
assertEquals("cp -ptoa is not working", SUCCESS, ret);
targetStatus = fs.getFileStatus(target5);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
assertEquals(owner, targetStatus.getOwner());
assertEquals(group, targetStatus.getGroup());
targetPerm = targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs = fs.getXAttrs(target5);
assertTrue(xattrs.isEmpty());
acls = fs.getAclStatus(target5).getEntries();
assertFalse(acls.isEmpty());
assertTrue(targetPerm.getAclBit());
assertEquals(fs.getAclStatus(src), fs.getAclStatus(target5));
} finally {
if (null != shell) {
shell.close();
}
if (null != fs) {
fs.delete(hdfsTestDir, true);
fs.close();
}
cluster.shutdown();
}
}
@Test (timeout = 120000)
public void testCopyCommandsWithRawXAttrs() throws Exception {
final Configuration conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).
numDataNodes(1).format(true).build();
FsShell shell = null;
FileSystem fs = null;
final String testdir = "/tmp/TestDFSShell-testCopyCommandsWithRawXAttrs-"
+ counter.getAndIncrement();
final Path hdfsTestDir = new Path(testdir);
final Path rawHdfsTestDir = new Path("/.reserved/raw" + testdir);
try {
fs = cluster.getFileSystem();
fs.mkdirs(hdfsTestDir);
final Path src = new Path(hdfsTestDir, "srcfile");
final String rawSrcBase = "/.reserved/raw" + testdir;
final Path rawSrc = new Path(rawSrcBase, "srcfile");
fs.create(src).close();
final Path srcDir = new Path(hdfsTestDir, "srcdir");
final Path rawSrcDir = new Path("/.reserved/raw" + testdir, "srcdir");
fs.mkdirs(srcDir);
final Path srcDirFile = new Path(srcDir, "srcfile");
final Path rawSrcDirFile =
new Path("/.reserved/raw" + srcDirFile);
fs.create(srcDirFile).close();
final Path[] paths = { rawSrc, rawSrcDir, rawSrcDirFile };
final String[] xattrNames = { USER_A1, RAW_A1 };
final byte[][] xattrVals = { USER_A1_VALUE, RAW_A1_VALUE };
for (int i = 0; i < paths.length; i++) {
for (int j = 0; j < xattrNames.length; j++) {
fs.setXAttr(paths[i], xattrNames[j], xattrVals[j]);
}
}
shell = new FsShell(conf);
/* Check that a file as the source path works ok. */
doTestCopyCommandsWithRawXAttrs(shell, fs, src, hdfsTestDir, false);
doTestCopyCommandsWithRawXAttrs(shell, fs, rawSrc, hdfsTestDir, false);
doTestCopyCommandsWithRawXAttrs(shell, fs, src, rawHdfsTestDir, false);
doTestCopyCommandsWithRawXAttrs(shell, fs, rawSrc, rawHdfsTestDir, true);
/* Use a relative /.reserved/raw path. */
final Path savedWd = fs.getWorkingDirectory();
try {
fs.setWorkingDirectory(new Path(rawSrcBase));
final Path relRawSrc = new Path("../srcfile");
final Path relRawHdfsTestDir = new Path("..");
doTestCopyCommandsWithRawXAttrs(shell, fs, relRawSrc, relRawHdfsTestDir,
true);
} finally {
fs.setWorkingDirectory(savedWd);
}
/* Check that a directory as the source path works ok. */
doTestCopyCommandsWithRawXAttrs(shell, fs, srcDir, hdfsTestDir, false);
doTestCopyCommandsWithRawXAttrs(shell, fs, rawSrcDir, hdfsTestDir, false);
doTestCopyCommandsWithRawXAttrs(shell, fs, srcDir, rawHdfsTestDir, false);
doTestCopyCommandsWithRawXAttrs(shell, fs, rawSrcDir, rawHdfsTestDir,
true);
/* Use relative in an absolute path. */
final String relRawSrcDir = "./.reserved/../.reserved/raw/../raw" +
testdir + "/srcdir";
final String relRawDstDir = "./.reserved/../.reserved/raw/../raw" +
testdir;
doTestCopyCommandsWithRawXAttrs(shell, fs, new Path(relRawSrcDir),
new Path(relRawDstDir), true);
} finally {
if (null != shell) {
shell.close();
}
if (null != fs) {
fs.delete(hdfsTestDir, true);
fs.close();
}
cluster.shutdown();
}
}
private void doTestCopyCommandsWithRawXAttrs(FsShell shell, FileSystem fs,
Path src, Path hdfsTestDir, boolean expectRaw) throws Exception {
Path target;
boolean srcIsRaw;
if (src.isAbsolute()) {
srcIsRaw = src.toString().contains("/.reserved/raw");
} else {
srcIsRaw = new Path(fs.getWorkingDirectory(), src).
toString().contains("/.reserved/raw");
}
final boolean destIsRaw = hdfsTestDir.toString().contains("/.reserved/raw");
final boolean srcDestMismatch = srcIsRaw ^ destIsRaw;
// -p (possibly preserve raw if src & dst are both /.r/r */
if (srcDestMismatch) {
doCopyAndTest(shell, hdfsTestDir, src, "-p", ERROR);
} else {
target = doCopyAndTest(shell, hdfsTestDir, src, "-p", SUCCESS);
checkXAttrs(fs, target, expectRaw, false);
}
// -px (possibly preserve raw, always preserve non-raw xattrs. */
if (srcDestMismatch) {
doCopyAndTest(shell, hdfsTestDir, src, "-px", ERROR);
} else {
target = doCopyAndTest(shell, hdfsTestDir, src, "-px", SUCCESS);
checkXAttrs(fs, target, expectRaw, true);
}
// no args (possibly preserve raw, never preserve non-raw xattrs. */
if (srcDestMismatch) {
doCopyAndTest(shell, hdfsTestDir, src, null, ERROR);
} else {
target = doCopyAndTest(shell, hdfsTestDir, src, null, SUCCESS);
checkXAttrs(fs, target, expectRaw, false);
}
}
private Path doCopyAndTest(FsShell shell, Path dest, Path src,
String cpArgs, int expectedExitCode) throws Exception {
final Path target = new Path(dest, "targetfile" +
counter.getAndIncrement());
final String[] argv = cpArgs == null ?
new String[] { "-cp", src.toUri().toString(),
target.toUri().toString() } :
new String[] { "-cp", cpArgs, src.toUri().toString(),
target.toUri().toString() };
final int ret = ToolRunner.run(shell, argv);
assertEquals("cp -p is not working", expectedExitCode, ret);
return target;
}
private void checkXAttrs(FileSystem fs, Path target, boolean expectRaw,
boolean expectVanillaXAttrs) throws Exception {
final Map<String, byte[]> xattrs = fs.getXAttrs(target);
int expectedCount = 0;
if (expectRaw) {
assertArrayEquals("raw.a1 has incorrect value",
RAW_A1_VALUE, xattrs.get(RAW_A1));
expectedCount++;
}
if (expectVanillaXAttrs) {
assertArrayEquals("user.a1 has incorrect value",
USER_A1_VALUE, xattrs.get(USER_A1));
expectedCount++;
}
assertEquals("xattrs size mismatch", expectedCount, xattrs.size());
}
// verify cp -ptopxa option will preserve directory attributes.
@Test (timeout = 120000)
public void testCopyCommandsToDirectoryWithPreserveOption()
throws Exception {
Configuration conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
.format(true).build();
FsShell shell = null;
FileSystem fs = null;
final String testdir =
"/tmp/TestDFSShell-testCopyCommandsToDirectoryWithPreserveOption-"
+ counter.getAndIncrement();
final Path hdfsTestDir = new Path(testdir);
try {
fs = cluster.getFileSystem();
fs.mkdirs(hdfsTestDir);
Path srcDir = new Path(hdfsTestDir, "srcDir");
fs.mkdirs(srcDir);
fs.setAcl(srcDir, Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, GROUP, "bar", READ_EXECUTE),
aclEntry(ACCESS, OTHER, EXECUTE)));
// set sticky bit
fs.setPermission(srcDir,
new FsPermission(ALL, READ_EXECUTE, EXECUTE, true));
// Create a file in srcDir to check if modification time of
// srcDir to be preserved after copying the file.
// If cp -p command is to preserve modification time and then copy child
// (srcFile), modification time will not be preserved.
Path srcFile = new Path(srcDir, "srcFile");
fs.create(srcFile).close();
FileStatus status = fs.getFileStatus(srcDir);
final long mtime = status.getModificationTime();
final long atime = status.getAccessTime();
final String owner = status.getOwner();
final String group = status.getGroup();
final FsPermission perm = status.getPermission();
fs.setXAttr(srcDir, USER_A1, USER_A1_VALUE);
fs.setXAttr(srcDir, TRUSTED_A1, TRUSTED_A1_VALUE);
shell = new FsShell(conf);
// -p
Path targetDir1 = new Path(hdfsTestDir, "targetDir1");
String[] argv = new String[] { "-cp", "-p", srcDir.toUri().toString(),
targetDir1.toUri().toString() };
int ret = ToolRunner.run(shell, argv);
assertEquals("cp -p is not working", SUCCESS, ret);
FileStatus targetStatus = fs.getFileStatus(targetDir1);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
assertEquals(owner, targetStatus.getOwner());
assertEquals(group, targetStatus.getGroup());
FsPermission targetPerm = targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
Map<String, byte[]> xattrs = fs.getXAttrs(targetDir1);
assertTrue(xattrs.isEmpty());
List<AclEntry> acls = fs.getAclStatus(targetDir1).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
// -ptop
Path targetDir2 = new Path(hdfsTestDir, "targetDir2");
argv = new String[] { "-cp", "-ptop", srcDir.toUri().toString(),
targetDir2.toUri().toString() };
ret = ToolRunner.run(shell, argv);
assertEquals("cp -ptop is not working", SUCCESS, ret);
targetStatus = fs.getFileStatus(targetDir2);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
assertEquals(owner, targetStatus.getOwner());
assertEquals(group, targetStatus.getGroup());
targetPerm = targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs = fs.getXAttrs(targetDir2);
assertTrue(xattrs.isEmpty());
acls = fs.getAclStatus(targetDir2).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
// -ptopx
Path targetDir3 = new Path(hdfsTestDir, "targetDir3");
argv = new String[] { "-cp", "-ptopx", srcDir.toUri().toString(),
targetDir3.toUri().toString() };
ret = ToolRunner.run(shell, argv);
assertEquals("cp -ptopx is not working", SUCCESS, ret);
targetStatus = fs.getFileStatus(targetDir3);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
assertEquals(owner, targetStatus.getOwner());
assertEquals(group, targetStatus.getGroup());
targetPerm = targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs = fs.getXAttrs(targetDir3);
assertEquals(xattrs.size(), 2);
assertArrayEquals(USER_A1_VALUE, xattrs.get(USER_A1));
assertArrayEquals(TRUSTED_A1_VALUE, xattrs.get(TRUSTED_A1));
acls = fs.getAclStatus(targetDir3).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
// -ptopa
Path targetDir4 = new Path(hdfsTestDir, "targetDir4");
argv = new String[] { "-cp", "-ptopa", srcDir.toUri().toString(),
targetDir4.toUri().toString() };
ret = ToolRunner.run(shell, argv);
assertEquals("cp -ptopa is not working", SUCCESS, ret);
targetStatus = fs.getFileStatus(targetDir4);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
assertEquals(owner, targetStatus.getOwner());
assertEquals(group, targetStatus.getGroup());
targetPerm = targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs = fs.getXAttrs(targetDir4);
assertTrue(xattrs.isEmpty());
acls = fs.getAclStatus(targetDir4).getEntries();
assertFalse(acls.isEmpty());
assertTrue(targetPerm.getAclBit());
assertEquals(fs.getAclStatus(srcDir), fs.getAclStatus(targetDir4));
// -ptoa (verify -pa option will preserve permissions also)
Path targetDir5 = new Path(hdfsTestDir, "targetDir5");
argv = new String[] { "-cp", "-ptoa", srcDir.toUri().toString(),
targetDir5.toUri().toString() };
ret = ToolRunner.run(shell, argv);
assertEquals("cp -ptoa is not working", SUCCESS, ret);
targetStatus = fs.getFileStatus(targetDir5);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
assertEquals(owner, targetStatus.getOwner());
assertEquals(group, targetStatus.getGroup());
targetPerm = targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs = fs.getXAttrs(targetDir5);
assertTrue(xattrs.isEmpty());
acls = fs.getAclStatus(targetDir5).getEntries();
assertFalse(acls.isEmpty());
assertTrue(targetPerm.getAclBit());
assertEquals(fs.getAclStatus(srcDir), fs.getAclStatus(targetDir5));
} finally {
if (shell != null) {
shell.close();
}
if (fs != null) {
fs.delete(hdfsTestDir, true);
fs.close();
}
cluster.shutdown();
}
}
// Verify cp -pa option will preserve both ACL and sticky bit.
@Test (timeout = 120000)
public void testCopyCommandsPreserveAclAndStickyBit() throws Exception {
Configuration conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
.format(true).build();
FsShell shell = null;
FileSystem fs = null;
final String testdir =
"/tmp/TestDFSShell-testCopyCommandsPreserveAclAndStickyBit-"
+ counter.getAndIncrement();
final Path hdfsTestDir = new Path(testdir);
try {
fs = cluster.getFileSystem();
fs.mkdirs(hdfsTestDir);
Path src = new Path(hdfsTestDir, "srcfile");
fs.create(src).close();
fs.setAcl(src, Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(ACCESS, GROUP, "bar", READ_EXECUTE),
aclEntry(ACCESS, OTHER, EXECUTE)));
// set sticky bit
fs.setPermission(src,
new FsPermission(ALL, READ_EXECUTE, EXECUTE, true));
FileStatus status = fs.getFileStatus(src);
final long mtime = status.getModificationTime();
final long atime = status.getAccessTime();
final String owner = status.getOwner();
final String group = status.getGroup();
final FsPermission perm = status.getPermission();
shell = new FsShell(conf);
// -p preserves sticky bit and doesn't preserve ACL
Path target1 = new Path(hdfsTestDir, "targetfile1");
String[] argv = new String[] { "-cp", "-p", src.toUri().toString(),
target1.toUri().toString() };
int ret = ToolRunner.run(shell, argv);
assertEquals("cp is not working", SUCCESS, ret);
FileStatus targetStatus = fs.getFileStatus(target1);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
assertEquals(owner, targetStatus.getOwner());
assertEquals(group, targetStatus.getGroup());
FsPermission targetPerm = targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
List<AclEntry> acls = fs.getAclStatus(target1).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
// -ptopa preserves both sticky bit and ACL
Path target2 = new Path(hdfsTestDir, "targetfile2");
argv = new String[] { "-cp", "-ptopa", src.toUri().toString(),
target2.toUri().toString() };
ret = ToolRunner.run(shell, argv);
assertEquals("cp -ptopa is not working", SUCCESS, ret);
targetStatus = fs.getFileStatus(target2);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
assertEquals(owner, targetStatus.getOwner());
assertEquals(group, targetStatus.getGroup());
targetPerm = targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
acls = fs.getAclStatus(target2).getEntries();
assertFalse(acls.isEmpty());
assertTrue(targetPerm.getAclBit());
assertEquals(fs.getAclStatus(src), fs.getAclStatus(target2));
} finally {
if (null != shell) {
shell.close();
}
if (null != fs) {
fs.delete(hdfsTestDir, true);
fs.close();
}
cluster.shutdown();
}
}
// force Copy Option is -f
@Test (timeout = 30000)
public void testCopyCommandsWithForceOption() throws Exception {
Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
.format(true).build();
FsShell shell = null;
FileSystem fs = null;
final File localFile = new File(TEST_ROOT_DIR, "testFileForPut");
final String localfilepath = new Path(localFile.getAbsolutePath()).toUri().toString();
final String testdir = "/tmp/TestDFSShell-testCopyCommandsWithForceOption-"
+ counter.getAndIncrement();
final Path hdfsTestDir = new Path(testdir);
try {
fs = cluster.getFileSystem();
fs.mkdirs(hdfsTestDir);
localFile.createNewFile();
writeFile(fs, new Path(testdir, "testFileForPut"));
shell = new FsShell();
// Tests for put
String[] argv = new String[] { "-put", "-f", localfilepath, testdir };
int res = ToolRunner.run(shell, argv);
assertEquals("put -f is not working", SUCCESS, res);
argv = new String[] { "-put", localfilepath, testdir };
res = ToolRunner.run(shell, argv);
assertEquals("put command itself is able to overwrite the file", ERROR,
res);
// Tests for copyFromLocal
argv = new String[] { "-copyFromLocal", "-f", localfilepath, testdir };
res = ToolRunner.run(shell, argv);
assertEquals("copyFromLocal -f is not working", SUCCESS, res);
argv = new String[] { "-copyFromLocal", localfilepath, testdir };
res = ToolRunner.run(shell, argv);
assertEquals(
"copyFromLocal command itself is able to overwrite the file", ERROR,
res);
// Tests for cp
argv = new String[] { "-cp", "-f", localfilepath, testdir };
res = ToolRunner.run(shell, argv);
assertEquals("cp -f is not working", SUCCESS, res);
argv = new String[] { "-cp", localfilepath, testdir };
res = ToolRunner.run(shell, argv);
assertEquals("cp command itself is able to overwrite the file", ERROR,
res);
} finally {
if (null != shell)
shell.close();
if (localFile.exists())
localFile.delete();
if (null != fs) {
fs.delete(hdfsTestDir, true);
fs.close();
}
cluster.shutdown();
}
}
// setrep for file and directory.
@Test (timeout = 30000)
public void testSetrep() throws Exception {
Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
.format(true).build();
FsShell shell = null;
FileSystem fs = null;
final String testdir1 = "/tmp/TestDFSShell-testSetrep-" + counter.getAndIncrement();
final String testdir2 = testdir1 + "/nestedDir";
final Path hdfsFile1 = new Path(testdir1, "testFileForSetrep");
final Path hdfsFile2 = new Path(testdir2, "testFileForSetrep");
final Short oldRepFactor = new Short((short) 1);
final Short newRepFactor = new Short((short) 3);
try {
String[] argv;
cluster.waitActive();
fs = cluster.getFileSystem();
assertThat(fs.mkdirs(new Path(testdir2)), is(true));
shell = new FsShell(conf);
fs.create(hdfsFile1, true).close();
fs.create(hdfsFile2, true).close();
// Tests for setrep on a file.
argv = new String[] { "-setrep", newRepFactor.toString(), hdfsFile1.toString() };
assertThat(shell.run(argv), is(SUCCESS));
assertThat(fs.getFileStatus(hdfsFile1).getReplication(), is(newRepFactor));
assertThat(fs.getFileStatus(hdfsFile2).getReplication(), is(oldRepFactor));
// Tests for setrep
// Tests for setrep on a directory and make sure it is applied recursively.
argv = new String[] { "-setrep", newRepFactor.toString(), testdir1 };
assertThat(shell.run(argv), is(SUCCESS));
assertThat(fs.getFileStatus(hdfsFile1).getReplication(), is(newRepFactor));
assertThat(fs.getFileStatus(hdfsFile2).getReplication(), is(newRepFactor));
} finally {
if (shell != null) {
shell.close();
}
cluster.shutdown();
}
}
/**
* Delete a file optionally configuring trash on the server and client.
*/
private void deleteFileUsingTrash(
boolean serverTrash, boolean clientTrash) throws Exception {
// Run a cluster, optionally with trash enabled on the server
Configuration serverConf = new HdfsConfiguration();
if (serverTrash) {
serverConf.setLong(FS_TRASH_INTERVAL_KEY, 1);
}
MiniDFSCluster cluster = new MiniDFSCluster.Builder(serverConf)
.numDataNodes(1).format(true).build();
Configuration clientConf = new Configuration(serverConf);
// Create a client, optionally with trash enabled
if (clientTrash) {
clientConf.setLong(FS_TRASH_INTERVAL_KEY, 1);
} else {
clientConf.setLong(FS_TRASH_INTERVAL_KEY, 0);
}
FsShell shell = new FsShell(clientConf);
FileSystem fs = null;
try {
// Create and delete a file
fs = cluster.getFileSystem();
// Use a separate tmp dir for each invocation.
final String testdir = "/tmp/TestDFSShell-deleteFileUsingTrash-" +
counter.getAndIncrement();
writeFile(fs, new Path(testdir, "foo"));
final String testFile = testdir + "/foo";
final String trashFile = shell.getCurrentTrashDir() + "/" + testFile;
String[] argv = new String[] { "-rm", testFile };
int res = ToolRunner.run(shell, argv);
assertEquals("rm failed", 0, res);
if (serverTrash) {
// If the server config was set we should use it unconditionally
assertTrue("File not in trash", fs.exists(new Path(trashFile)));
} else if (clientTrash) {
// If the server config was not set but the client config was
// set then we should use it
assertTrue("File not in trashed", fs.exists(new Path(trashFile)));
} else {
// If neither was set then we should not have trashed the file
assertFalse("File was not removed", fs.exists(new Path(testFile)));
assertFalse("File was trashed", fs.exists(new Path(trashFile)));
}
} finally {
if (fs != null) {
fs.close();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test (timeout = 300000)
public void testAppendToFile() throws Exception {
final int inputFileLength = 1024 * 1024;
File testRoot = new File(TEST_ROOT_DIR, "testAppendtoFileDir");
testRoot.mkdirs();
File file1 = new File(testRoot, "file1");
File file2 = new File(testRoot, "file2");
createLocalFileWithRandomData(inputFileLength, file1);
createLocalFileWithRandomData(inputFileLength, file2);
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
try {
FileSystem dfs = cluster.getFileSystem();
assertTrue("Not a HDFS: " + dfs.getUri(),
dfs instanceof DistributedFileSystem);
// Run appendToFile once, make sure that the target file is
// created and is of the right size.
Path remoteFile = new Path("/remoteFile");
FsShell shell = new FsShell();
shell.setConf(conf);
String[] argv = new String[] {
"-appendToFile", file1.toString(), file2.toString(), remoteFile.toString() };
int res = ToolRunner.run(shell, argv);
assertThat(res, is(0));
assertThat(dfs.getFileStatus(remoteFile).getLen(), is((long) inputFileLength * 2));
// Run the command once again and make sure that the target file
// size has been doubled.
res = ToolRunner.run(shell, argv);
assertThat(res, is(0));
assertThat(dfs.getFileStatus(remoteFile).getLen(), is((long) inputFileLength * 4));
} finally {
cluster.shutdown();
}
}
@Test (timeout = 300000)
public void testAppendToFileBadArgs() throws Exception {
final int inputFileLength = 1024 * 1024;
File testRoot = new File(TEST_ROOT_DIR, "testAppendToFileBadArgsDir");
testRoot.mkdirs();
File file1 = new File(testRoot, "file1");
createLocalFileWithRandomData(inputFileLength, file1);
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
try {
FileSystem dfs = cluster.getFileSystem();
assertTrue("Not a HDFS: " + dfs.getUri(),
dfs instanceof DistributedFileSystem);
// Run appendToFile with insufficient arguments.
FsShell shell = new FsShell();
shell.setConf(conf);
String[] argv = new String[] {
"-appendToFile", file1.toString() };
int res = ToolRunner.run(shell, argv);
assertThat(res, not(0));
// Mix stdin with other input files. Must fail.
Path remoteFile = new Path("/remoteFile");
argv = new String[] {
"-appendToFile", file1.toString(), "-", remoteFile.toString() };
res = ToolRunner.run(shell, argv);
assertThat(res, not(0));
} finally {
cluster.shutdown();
}
}
@Test (timeout = 30000)
public void testSetXAttrPermission() throws Exception {
UserGroupInformation user = UserGroupInformation.
createUserForTesting("user", new String[] {"mygroup"});
MiniDFSCluster cluster = null;
PrintStream bak = null;
try {
final Configuration conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
Path p = new Path("/foo");
fs.mkdirs(p);
bak = System.err;
final FsShell fshell = new FsShell(conf);
final ByteArrayOutputStream out = new ByteArrayOutputStream();
System.setErr(new PrintStream(out));
// No permission to write xattr
fs.setPermission(p, new FsPermission((short) 0700));
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
int ret = ToolRunner.run(fshell, new String[]{
"-setfattr", "-n", "user.a1", "-v", "1234", "/foo"});
assertEquals("Returned should be 1", 1, ret);
String str = out.toString();
assertTrue("Permission denied printed",
str.indexOf("Permission denied") != -1);
out.reset();
return null;
}
});
int ret = ToolRunner.run(fshell, new String[]{
"-setfattr", "-n", "user.a1", "-v", "1234", "/foo"});
assertEquals("Returned should be 0", 0, ret);
out.reset();
// No permission to read and remove
fs.setPermission(p, new FsPermission((short) 0750));
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
// Read
int ret = ToolRunner.run(fshell, new String[]{
"-getfattr", "-n", "user.a1", "/foo"});
assertEquals("Returned should be 1", 1, ret);
String str = out.toString();
assertTrue("Permission denied printed",
str.indexOf("Permission denied") != -1);
out.reset();
// Remove
ret = ToolRunner.run(fshell, new String[]{
"-setfattr", "-x", "user.a1", "/foo"});
assertEquals("Returned should be 1", 1, ret);
str = out.toString();
assertTrue("Permission denied printed",
str.indexOf("Permission denied") != -1);
out.reset();
return null;
}
});
} finally {
if (bak != null) {
System.setErr(bak);
}
if (cluster != null) {
cluster.shutdown();
}
}
}
/* HDFS-6413 xattr names erroneously handled as case-insensitive */
@Test (timeout = 30000)
public void testSetXAttrCaseSensitivity() throws Exception {
MiniDFSCluster cluster = null;
PrintStream bak = null;
try {
final Configuration conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
Path p = new Path("/mydir");
fs.mkdirs(p);
bak = System.err;
final FsShell fshell = new FsShell(conf);
final ByteArrayOutputStream out = new ByteArrayOutputStream();
System.setOut(new PrintStream(out));
doSetXattr(out, fshell,
new String[] {"-setfattr", "-n", "User.Foo", "/mydir"},
new String[] {"-getfattr", "-d", "/mydir"},
new String[] {"user.Foo"},
new String[] {});
doSetXattr(out, fshell,
new String[] {"-setfattr", "-n", "user.FOO", "/mydir"},
new String[] {"-getfattr", "-d", "/mydir"},
new String[] {"user.Foo", "user.FOO"},
new String[] {});
doSetXattr(out, fshell,
new String[] {"-setfattr", "-n", "USER.foo", "/mydir"},
new String[] {"-getfattr", "-d", "/mydir"},
new String[] {"user.Foo", "user.FOO", "user.foo"},
new String[] {});
doSetXattr(out, fshell,
new String[] {"-setfattr", "-n", "USER.fOo", "-v", "myval", "/mydir"},
new String[] {"-getfattr", "-d", "/mydir"},
new String[] {"user.Foo", "user.FOO", "user.foo", "user.fOo=\"myval\""},
new String[] {"user.Foo=", "user.FOO=", "user.foo="});
doSetXattr(out, fshell,
new String[] {"-setfattr", "-x", "useR.foo", "/mydir"},
new String[] {"-getfattr", "-d", "/mydir"},
new String[] {"user.Foo", "user.FOO"},
new String[] {"foo"});
doSetXattr(out, fshell,
new String[] {"-setfattr", "-x", "USER.FOO", "/mydir"},
new String[] {"-getfattr", "-d", "/mydir"},
new String[] {"user.Foo"},
new String[] {"FOO"});
doSetXattr(out, fshell,
new String[] {"-setfattr", "-x", "useR.Foo", "/mydir"},
new String[] {"-getfattr", "-n", "User.Foo", "/mydir"},
new String[] {},
new String[] {"Foo"});
} finally {
if (bak != null) {
System.setOut(bak);
}
if (cluster != null) {
cluster.shutdown();
}
}
}
private void doSetXattr(ByteArrayOutputStream out, FsShell fshell,
String[] setOp, String[] getOp, String[] expectArr,
String[] dontExpectArr) throws Exception {
int ret = ToolRunner.run(fshell, setOp);
out.reset();
ret = ToolRunner.run(fshell, getOp);
final String str = out.toString();
for (int i = 0; i < expectArr.length; i++) {
final String expect = expectArr[i];
final StringBuilder sb = new StringBuilder
("Incorrect results from getfattr. Expected: ");
sb.append(expect).append(" Full Result: ");
sb.append(str);
assertTrue(sb.toString(),
str.indexOf(expect) != -1);
}
for (int i = 0; i < dontExpectArr.length; i++) {
String dontExpect = dontExpectArr[i];
final StringBuilder sb = new StringBuilder
("Incorrect results from getfattr. Didn't Expect: ");
sb.append(dontExpect).append(" Full Result: ");
sb.append(str);
assertTrue(sb.toString(),
str.indexOf(dontExpect) == -1);
}
out.reset();
}
/**
*
* Test to make sure that user namespace xattrs can be set only if path has
* access and for sticky directorries, only owner/privileged user can write.
* Trusted namespace xattrs can be set only with privileged users.
*
* As user1: Create a directory (/foo) as user1, chown it to user1 (and
* user1's group), grant rwx to "other".
*
* As user2: Set an xattr (should pass with path access).
*
* As user1: Set an xattr (should pass).
*
* As user2: Read the xattr (should pass). Remove the xattr (should pass with
* path access).
*
* As user1: Read the xattr (should pass). Remove the xattr (should pass).
*
* As user1: Change permissions only to owner
*
* As User2: Set an Xattr (Should fail set with no path access) Remove an
* Xattr (Should fail with no path access)
*
* As SuperUser: Set an Xattr with Trusted (Should pass)
*/
@Test (timeout = 30000)
public void testSetXAttrPermissionAsDifferentOwner() throws Exception {
final String USER1 = "user1";
final String GROUP1 = "supergroup";
final UserGroupInformation user1 = UserGroupInformation.
createUserForTesting(USER1, new String[] {GROUP1});
final UserGroupInformation user2 = UserGroupInformation.
createUserForTesting("user2", new String[] {"mygroup2"});
final UserGroupInformation SUPERUSER = UserGroupInformation.getCurrentUser();
MiniDFSCluster cluster = null;
PrintStream bak = null;
try {
final Configuration conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final FileSystem fs = cluster.getFileSystem();
fs.setOwner(new Path("/"), USER1, GROUP1);
bak = System.err;
final FsShell fshell = new FsShell(conf);
final ByteArrayOutputStream out = new ByteArrayOutputStream();
System.setErr(new PrintStream(out));
//Test 1. Let user1 be owner for /foo
user1.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final int ret = ToolRunner.run(fshell, new String[]{
"-mkdir", "/foo"});
assertEquals("Return should be 0", 0, ret);
out.reset();
return null;
}
});
//Test 2. Give access to others
user1.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
// Give access to "other"
final int ret = ToolRunner.run(fshell, new String[]{
"-chmod", "707", "/foo"});
assertEquals("Return should be 0", 0, ret);
out.reset();
return null;
}
});
// Test 3. Should be allowed to write xattr if there is a path access to
// user (user2).
user2.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final int ret = ToolRunner.run(fshell, new String[]{
"-setfattr", "-n", "user.a1", "-v", "1234", "/foo"});
assertEquals("Returned should be 0", 0, ret);
out.reset();
return null;
}
});
//Test 4. There should be permission to write xattr for
// the owning user with write permissions.
user1.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final int ret = ToolRunner.run(fshell, new String[]{
"-setfattr", "-n", "user.a1", "-v", "1234", "/foo"});
assertEquals("Returned should be 0", 0, ret);
out.reset();
return null;
}
});
// Test 5. There should be permission to read non-owning user (user2) if
// there is path access to that user and also can remove.
user2.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
// Read
int ret = ToolRunner.run(fshell, new String[] { "-getfattr", "-n",
"user.a1", "/foo" });
assertEquals("Returned should be 0", 0, ret);
out.reset();
// Remove
ret = ToolRunner.run(fshell, new String[] { "-setfattr", "-x",
"user.a1", "/foo" });
assertEquals("Returned should be 0", 0, ret);
out.reset();
return null;
}
});
// Test 6. There should be permission to read/remove for
// the owning user with path access.
user1.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
return null;
}
});
// Test 7. Change permission to have path access only to owner(user1)
user1.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
// Give access to "other"
final int ret = ToolRunner.run(fshell, new String[]{
"-chmod", "700", "/foo"});
assertEquals("Return should be 0", 0, ret);
out.reset();
return null;
}
});
// Test 8. There should be no permissions to set for
// the non-owning user with no path access.
user2.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
// set
int ret = ToolRunner.run(fshell, new String[] { "-setfattr", "-n",
"user.a2", "/foo" });
assertEquals("Returned should be 1", 1, ret);
final String str = out.toString();
assertTrue("Permission denied printed",
str.indexOf("Permission denied") != -1);
out.reset();
return null;
}
});
// Test 9. There should be no permissions to remove for
// the non-owning user with no path access.
user2.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
// set
int ret = ToolRunner.run(fshell, new String[] { "-setfattr", "-x",
"user.a2", "/foo" });
assertEquals("Returned should be 1", 1, ret);
final String str = out.toString();
assertTrue("Permission denied printed",
str.indexOf("Permission denied") != -1);
out.reset();
return null;
}
});
// Test 10. Superuser should be allowed to set with trusted namespace
SUPERUSER.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
// set
int ret = ToolRunner.run(fshell, new String[] { "-setfattr", "-n",
"trusted.a3", "/foo" });
assertEquals("Returned should be 0", 0, ret);
out.reset();
return null;
}
});
} finally {
if (bak != null) {
System.setErr(bak);
}
if (cluster != null) {
cluster.shutdown();
}
}
}
/*
* 1. Test that CLI throws an exception and returns non-0 when user does
* not have permission to read an xattr.
* 2. Test that CLI throws an exception and returns non-0 when a non-existent
* xattr is requested.
*/
@Test (timeout = 120000)
public void testGetFAttrErrors() throws Exception {
final UserGroupInformation user = UserGroupInformation.
createUserForTesting("user", new String[] {"mygroup"});
MiniDFSCluster cluster = null;
PrintStream bakErr = null;
try {
final Configuration conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final FileSystem fs = cluster.getFileSystem();
final Path p = new Path("/foo");
fs.mkdirs(p);
bakErr = System.err;
final FsShell fshell = new FsShell(conf);
final ByteArrayOutputStream out = new ByteArrayOutputStream();
System.setErr(new PrintStream(out));
// No permission for "other".
fs.setPermission(p, new FsPermission((short) 0700));
{
final int ret = ToolRunner.run(fshell, new String[] {
"-setfattr", "-n", "user.a1", "-v", "1234", "/foo"});
assertEquals("Returned should be 0", 0, ret);
out.reset();
}
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
int ret = ToolRunner.run(fshell, new String[] {
"-getfattr", "-n", "user.a1", "/foo"});
String str = out.toString();
assertTrue("xattr value was incorrectly returned",
str.indexOf("1234") == -1);
out.reset();
return null;
}
});
{
final int ret = ToolRunner.run(fshell, new String[]{
"-getfattr", "-n", "user.nonexistent", "/foo"});
String str = out.toString();
assertTrue("xattr value was incorrectly returned",
str.indexOf(
"getfattr: At least one of the attributes provided was not found")
>= 0);
out.reset();
}
} finally {
if (bakErr != null) {
System.setErr(bakErr);
}
if (cluster != null) {
cluster.shutdown();
}
}
}
/**
* Test that the server trash configuration is respected when
* the client configuration is not set.
*/
@Test (timeout = 30000)
public void testServerConfigRespected() throws Exception {
deleteFileUsingTrash(true, false);
}
/**
* Test that server trash configuration is respected even when the
* client configuration is set.
*/
@Test (timeout = 30000)
public void testServerConfigRespectedWithClient() throws Exception {
deleteFileUsingTrash(true, true);
}
/**
* Test that the client trash configuration is respected when
* the server configuration is not set.
*/
@Test (timeout = 30000)
public void testClientConfigRespected() throws Exception {
deleteFileUsingTrash(false, true);
}
/**
* Test that trash is disabled by default.
*/
@Test (timeout = 30000)
public void testNoTrashConfig() throws Exception {
deleteFileUsingTrash(false, false);
}
}
| 106,584 | 35.020615 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import java.io.DataOutputStream;
import java.io.IOException;
import java.lang.reflect.Field;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DataStreamer.LastExceptionInStreamer;
import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
import org.mockito.internal.util.reflection.Whitebox;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.when;
public class TestDFSOutputStream {
static MiniDFSCluster cluster;
@BeforeClass
public static void setup() throws IOException {
Configuration conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf).build();
}
/**
* The close() method of DFSOutputStream should never throw the same exception
* twice. See HDFS-5335 for details.
*/
@Test
public void testCloseTwice() throws IOException {
DistributedFileSystem fs = cluster.getFileSystem();
FSDataOutputStream os = fs.create(new Path("/test"));
DFSOutputStream dos = (DFSOutputStream) Whitebox.getInternalState(os,
"wrappedStream");
DataStreamer streamer = (DataStreamer) Whitebox
.getInternalState(dos, "streamer");
@SuppressWarnings("unchecked")
LastExceptionInStreamer ex = (LastExceptionInStreamer) Whitebox
.getInternalState(streamer, "lastException");
Throwable thrown = (Throwable) Whitebox.getInternalState(ex, "thrown");
Assert.assertNull(thrown);
dos.close();
IOException dummy = new IOException("dummy");
ex.set(dummy);
try {
dos.close();
} catch (IOException e) {
Assert.assertEquals(e, dummy);
}
thrown = (Throwable) Whitebox.getInternalState(ex, "thrown");
Assert.assertNull(thrown);
dos.close();
}
/**
* The computePacketChunkSize() method of DFSOutputStream should set the actual
* packet size < 64kB. See HDFS-7308 for details.
*/
@Test
public void testComputePacketChunkSize()
throws Exception {
DistributedFileSystem fs = cluster.getFileSystem();
FSDataOutputStream os = fs.create(new Path("/test"));
DFSOutputStream dos = (DFSOutputStream) Whitebox.getInternalState(os,
"wrappedStream");
final int packetSize = 64*1024;
final int bytesPerChecksum = 512;
Method method = dos.getClass().getDeclaredMethod("computePacketChunkSize",
int.class, int.class);
method.setAccessible(true);
method.invoke(dos, packetSize, bytesPerChecksum);
Field field = dos.getClass().getDeclaredField("packetSize");
field.setAccessible(true);
Assert.assertTrue((Integer) field.get(dos) + 33 < packetSize);
// If PKT_MAX_HEADER_LEN is 257, actual packet size come to over 64KB
// without a fix on HDFS-7308.
Assert.assertTrue((Integer) field.get(dos) + 257 < packetSize);
}
@Test
public void testCongestionBackoff() throws IOException {
DfsClientConf dfsClientConf = mock(DfsClientConf.class);
DFSClient client = mock(DFSClient.class);
when(client.getConf()).thenReturn(dfsClientConf);
client.clientRunning = true;
DataStreamer stream = new DataStreamer(
mock(HdfsFileStatus.class),
mock(ExtendedBlock.class),
client,
"foo", null, null, null, null, null);
DataOutputStream blockStream = mock(DataOutputStream.class);
doThrow(new IOException()).when(blockStream).flush();
Whitebox.setInternalState(stream, "blockStream", blockStream);
Whitebox.setInternalState(stream, "stage",
BlockConstructionStage.PIPELINE_CLOSE);
@SuppressWarnings("unchecked")
LinkedList<DFSPacket> dataQueue = (LinkedList<DFSPacket>)
Whitebox.getInternalState(stream, "dataQueue");
@SuppressWarnings("unchecked")
ArrayList<DatanodeInfo> congestedNodes = (ArrayList<DatanodeInfo>)
Whitebox.getInternalState(stream, "congestedNodes");
congestedNodes.add(mock(DatanodeInfo.class));
DFSPacket packet = mock(DFSPacket.class);
when(packet.getTraceParents()).thenReturn(new long[] {});
dataQueue.add(packet);
stream.run();
Assert.assertTrue(congestedNodes.isEmpty());
}
@AfterClass
public static void tearDown() {
cluster.shutdown();
}
}
| 5,624 | 35.764706 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgradeDowngrade.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.spy;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster;
import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
import org.apache.hadoop.hdfs.server.namenode.NNStorage;
import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
import org.junit.Assert;
import org.junit.Test;
public class TestRollingUpgradeDowngrade {
@Test(timeout = 300000)
public void testDowngrade() throws Exception {
final Configuration conf = new HdfsConfiguration();
MiniQJMHACluster cluster = null;
final Path foo = new Path("/foo");
final Path bar = new Path("/bar");
try {
cluster = new MiniQJMHACluster.Builder(conf).build();
MiniDFSCluster dfsCluster = cluster.getDfsCluster();
dfsCluster.waitActive();
// let NN1 tail editlog every 1s
dfsCluster.getConfiguration(1).setInt(
DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
dfsCluster.restartNameNode(1);
dfsCluster.transitionToActive(0);
DistributedFileSystem dfs = dfsCluster.getFileSystem(0);
dfs.mkdirs(foo);
// start rolling upgrade
RollingUpgradeInfo info = dfs
.rollingUpgrade(RollingUpgradeAction.PREPARE);
Assert.assertTrue(info.isStarted());
dfs.mkdirs(bar);
TestRollingUpgrade.queryForPreparation(dfs);
dfs.close();
dfsCluster.restartNameNode(0, true, "-rollingUpgrade", "downgrade");
// Once downgraded, there should be no more fsimage for rollbacks.
Assert.assertFalse(dfsCluster.getNamesystem(0).getFSImage()
.hasRollbackFSImage());
// shutdown NN1
dfsCluster.shutdownNameNode(1);
dfsCluster.transitionToActive(0);
dfs = dfsCluster.getFileSystem(0);
Assert.assertTrue(dfs.exists(foo));
Assert.assertTrue(dfs.exists(bar));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
/**
* Ensure that during downgrade the NN fails to load a fsimage with newer
* format.
*/
@Test(expected = IncorrectVersionException.class)
public void testRejectNewFsImage() throws IOException {
final Configuration conf = new Configuration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
fs.saveNamespace();
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
NNStorage storage = spy(cluster.getNameNode().getFSImage().getStorage());
int futureVersion = NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION - 1;
doReturn(futureVersion).when(storage).getServiceLayoutVersion();
storage.writeAll();
cluster.restartNameNode(0, true, "-rollingUpgrade", "downgrade");
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
}
| 4,142 | 35.342105 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSInputChecker.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.EOFException;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.RandomAccessFile;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.PathUtils;
import org.junit.Test;
/**
* This class tests if FSInputChecker works correctly.
*/
public class TestFSInputChecker {
static final long seed = 0xDEADBEEFL;
static final int BYTES_PER_SUM = 10;
static final int BLOCK_SIZE = 2*BYTES_PER_SUM;
static final int HALF_CHUNK_SIZE = BYTES_PER_SUM/2;
static final int FILE_SIZE = 2*BLOCK_SIZE-1;
static final short NUM_OF_DATANODES = 2;
final byte[] expected = new byte[FILE_SIZE];
byte[] actual;
FSDataInputStream stm;
final Random rand = new Random(seed);
/* create a file */
private void writeFile(FileSystem fileSys, Path name) throws IOException {
// create and write a file that contains three blocks of data
FSDataOutputStream stm = fileSys.create(name, new FsPermission((short)0777),
true, fileSys.getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
NUM_OF_DATANODES, BLOCK_SIZE, null);
stm.write(expected);
stm.close();
}
/*validate data*/
private void checkAndEraseData(byte[] actual, int from, byte[] expected,
String message) throws Exception {
for (int idx = 0; idx < actual.length; idx++) {
assertEquals(message+" byte "+(from+idx)+" differs. expected "+
expected[from+idx]+" actual "+actual[idx],
actual[idx], expected[from+idx]);
actual[idx] = 0;
}
}
/* test read and getPos */
private void checkReadAndGetPos() throws Exception {
actual = new byte[FILE_SIZE];
// test reads that do not cross checksum boundary
stm.seek(0);
int offset;
for(offset=0; offset<BLOCK_SIZE+BYTES_PER_SUM;
offset += BYTES_PER_SUM ) {
assertEquals(stm.getPos(), offset);
stm.readFully(actual, offset, BYTES_PER_SUM);
}
stm.readFully(actual, offset, FILE_SIZE-BLOCK_SIZE-BYTES_PER_SUM);
assertEquals(stm.getPos(), FILE_SIZE);
checkAndEraseData(actual, 0, expected, "Read Sanity Test");
// test reads that cross checksum boundary
stm.seek(0L);
assertEquals(stm.getPos(), 0L);
stm.readFully(actual, 0, HALF_CHUNK_SIZE);
assertEquals(stm.getPos(), HALF_CHUNK_SIZE);
stm.readFully(actual, HALF_CHUNK_SIZE, BLOCK_SIZE-HALF_CHUNK_SIZE);
assertEquals(stm.getPos(), BLOCK_SIZE);
stm.readFully(actual, BLOCK_SIZE, BYTES_PER_SUM+HALF_CHUNK_SIZE);
assertEquals(stm.getPos(), BLOCK_SIZE+BYTES_PER_SUM+HALF_CHUNK_SIZE);
stm.readFully(actual, 2*BLOCK_SIZE-HALF_CHUNK_SIZE,
FILE_SIZE-(2*BLOCK_SIZE-HALF_CHUNK_SIZE));
assertEquals(stm.getPos(), FILE_SIZE);
checkAndEraseData(actual, 0, expected, "Read Sanity Test");
// test read that cross block boundary
stm.seek(0L);
stm.readFully(actual, 0, BYTES_PER_SUM+HALF_CHUNK_SIZE);
assertEquals(stm.getPos(), BYTES_PER_SUM+HALF_CHUNK_SIZE);
stm.readFully(actual, BYTES_PER_SUM+HALF_CHUNK_SIZE, BYTES_PER_SUM);
assertEquals(stm.getPos(), BLOCK_SIZE+HALF_CHUNK_SIZE);
stm.readFully(actual, BLOCK_SIZE+HALF_CHUNK_SIZE,
FILE_SIZE-BLOCK_SIZE-HALF_CHUNK_SIZE);
assertEquals(stm.getPos(), FILE_SIZE);
checkAndEraseData(actual, 0, expected, "Read Sanity Test");
}
/* test if one seek is correct */
private void testSeek1(int offset)
throws Exception {
stm.seek(offset);
assertEquals(offset, stm.getPos());
stm.readFully(actual);
checkAndEraseData(actual, offset, expected, "Read Sanity Test");
}
/* test seek() */
private void checkSeek( ) throws Exception {
actual = new byte[HALF_CHUNK_SIZE];
// test seeks to checksum boundary
testSeek1(0);
testSeek1(BYTES_PER_SUM);
testSeek1(BLOCK_SIZE);
// test seek to non-checksum-boundary pos
testSeek1(BLOCK_SIZE+HALF_CHUNK_SIZE);
testSeek1(HALF_CHUNK_SIZE);
// test seek to a position at the same checksum chunk
testSeek1(HALF_CHUNK_SIZE/2);
testSeek1(HALF_CHUNK_SIZE*3/2);
// test end of file
actual = new byte[1];
testSeek1(FILE_SIZE-1);
String errMsg = null;
try {
stm.seek(FILE_SIZE);
} catch (IOException e) {
errMsg = e.getMessage();
}
assertTrue(errMsg==null);
}
/* test if one skip is correct */
private void testSkip1(int skippedBytes)
throws Exception {
long oldPos = stm.getPos();
IOUtils.skipFully(stm, skippedBytes);
long newPos = oldPos + skippedBytes;
assertEquals(stm.getPos(), newPos);
stm.readFully(actual);
checkAndEraseData(actual, (int)newPos, expected, "Read Sanity Test");
}
/* test skip() */
private void checkSkip( ) throws Exception {
actual = new byte[HALF_CHUNK_SIZE];
// test skip to a checksum boundary
stm.seek(0);
testSkip1(BYTES_PER_SUM);
testSkip1(HALF_CHUNK_SIZE);
testSkip1(HALF_CHUNK_SIZE);
// test skip to non-checksum-boundary pos
stm.seek(0);
testSkip1(HALF_CHUNK_SIZE + 1);
testSkip1(BYTES_PER_SUM);
testSkip1(HALF_CHUNK_SIZE);
// test skip to a position at the same checksum chunk
stm.seek(0);
testSkip1(1);
testSkip1(1);
// test skip to end of file
stm.seek(0);
actual = new byte[1];
testSkip1(FILE_SIZE-1);
stm.seek(0);
IOUtils.skipFully(stm, FILE_SIZE);
try {
IOUtils.skipFully(stm, 10);
fail("expected to get a PrematureEOFException");
} catch (EOFException e) {
assertEquals(e.getMessage(), "Premature EOF from inputStream " +
"after skipping 0 byte(s).");
}
stm.seek(0);
try {
IOUtils.skipFully(stm, FILE_SIZE + 10);
fail("expected to get a PrematureEOFException");
} catch (EOFException e) {
assertEquals(e.getMessage(), "Premature EOF from inputStream " +
"after skipping " + FILE_SIZE + " byte(s).");
}
stm.seek(10);
try {
IOUtils.skipFully(stm, FILE_SIZE);
fail("expected to get a PrematureEOFException");
} catch (EOFException e) {
assertEquals(e.getMessage(), "Premature EOF from inputStream " +
"after skipping " + (FILE_SIZE - 10) + " byte(s).");
}
}
private void cleanupFile(FileSystem fileSys, Path name) throws IOException {
assertTrue(fileSys.exists(name));
fileSys.delete(name, true);
assertTrue(!fileSys.exists(name));
}
/**
* Tests read/seek/getPos/skipped opeation for input stream.
*/
private void testChecker(FileSystem fileSys, boolean readCS)
throws Exception {
Path file = new Path("try.dat");
writeFile(fileSys, file);
try {
if (!readCS) {
fileSys.setVerifyChecksum(false);
}
stm = fileSys.open(file);
checkReadAndGetPos();
checkSeek();
checkSkip();
//checkMark
assertFalse(stm.markSupported());
stm.close();
} finally {
if (!readCS) {
fileSys.setVerifyChecksum(true);
}
cleanupFile(fileSys, file);
}
}
private void testFileCorruption(LocalFileSystem fileSys) throws IOException {
// create a file and verify that checksum corruption results in
// a checksum exception on LocalFS
String dir = PathUtils.getTestDirName(getClass());
Path file = new Path(dir + "/corruption-test.dat");
Path crcFile = new Path(dir + "/.corruption-test.dat.crc");
writeFile(fileSys, file);
int fileLen = (int)fileSys.getFileStatus(file).getLen();
byte [] buf = new byte[fileLen];
InputStream in = fileSys.open(file);
IOUtils.readFully(in, buf, 0, buf.length);
in.close();
// check .crc corruption
checkFileCorruption(fileSys, file, crcFile);
fileSys.delete(file, true);
writeFile(fileSys, file);
// check data corrutpion
checkFileCorruption(fileSys, file, file);
fileSys.delete(file, true);
}
private void checkFileCorruption(LocalFileSystem fileSys, Path file,
Path fileToCorrupt) throws IOException {
// corrupt the file
RandomAccessFile out =
new RandomAccessFile(new File(fileToCorrupt.toString()), "rw");
byte[] buf = new byte[(int)fileSys.getFileStatus(file).getLen()];
int corruptFileLen = (int)fileSys.getFileStatus(fileToCorrupt).getLen();
assertTrue(buf.length >= corruptFileLen);
rand.nextBytes(buf);
out.seek(corruptFileLen/2);
out.write(buf, 0, corruptFileLen/4);
out.close();
boolean gotException = false;
InputStream in = fileSys.open(file);
try {
IOUtils.readFully(in, buf, 0, buf.length);
} catch (ChecksumException e) {
gotException = true;
}
assertTrue(gotException);
in.close();
}
@Test
public void testFSInputChecker() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BYTES_PER_SUM);
rand.nextBytes(expected);
// test DFS
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fileSys = cluster.getFileSystem();
try {
testChecker(fileSys, true);
testChecker(fileSys, false);
testSeekAndRead(fileSys);
} finally {
fileSys.close();
cluster.shutdown();
}
// test Local FS
fileSys = FileSystem.getLocal(conf);
try {
testChecker(fileSys, true);
testChecker(fileSys, false);
testFileCorruption((LocalFileSystem)fileSys);
testSeekAndRead(fileSys);
}finally {
fileSys.close();
}
}
private void testSeekAndRead(FileSystem fileSys)
throws IOException {
Path file = new Path("try.dat");
writeFile(fileSys, file);
stm = fileSys.open(
file,
fileSys.getConf().getInt(
CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096));
checkSeekAndRead();
stm.close();
cleanupFile(fileSys, file);
}
private void checkSeekAndRead() throws IOException {
int position = 1;
int len = 2 * BYTES_PER_SUM - position;
readAndCompare(stm, position, len);
position = BYTES_PER_SUM;
len = BYTES_PER_SUM;
readAndCompare(stm, position, len);
}
private void readAndCompare(FSDataInputStream in, int position, int len)
throws IOException {
byte[] b = new byte[len];
in.seek(position);
IOUtils.readFully(in, b, 0, b.length);
for (int i = 0; i < b.length; i++) {
assertEquals(expected[position + i], b[i]);
}
}
}
| 12,128 | 30.585938 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.Path;
import org.junit.Assert;
import org.junit.Test;
/**
* This class tests the client connection caching in a single node
* mini-cluster.
*/
public class TestConnCache {
static final Log LOG = LogFactory.getLog(TestConnCache.class);
static final int BLOCK_SIZE = 4096;
static final int FILE_SIZE = 3 * BLOCK_SIZE;
/**
* (Optionally) seek to position, read and verify data.
*
* Seek to specified position if pos is non-negative.
*/
private void pread(DFSInputStream in,
long pos,
byte[] buffer,
int offset,
int length,
byte[] authenticData)
throws IOException {
Assert.assertTrue("Test buffer too small", buffer.length >= offset + length);
if (pos >= 0)
in.seek(pos);
LOG.info("Reading from file of size " + in.getFileLength() +
" at offset " + in.getPos());
while (length > 0) {
int cnt = in.read(buffer, offset, length);
Assert.assertTrue("Error in read", cnt > 0);
offset += cnt;
length -= cnt;
}
// Verify
for (int i = 0; i < length; ++i) {
byte actual = buffer[i];
byte expect = authenticData[(int)pos + i];
assertEquals("Read data mismatch at file offset " + (pos + i) +
". Expects " + expect + "; got " + actual,
actual, expect);
}
}
/**
* Read a file served entirely from one DN. Seek around and read from
* different offsets. And verify that they all use the same socket.
* @throws Exception
*/
@Test
public void testReadFromOneDN() throws Exception {
HdfsConfiguration configuration = new HdfsConfiguration();
// One of the goals of this test is to verify that we don't open more
// than one socket. So use a different client context, so that we
// get our own socket cache, rather than sharing with the other test
// instances. Also use a really long socket timeout so that nothing
// gets closed before we get around to checking the cache size at the end.
final String contextName = "testReadFromOneDNContext";
configuration.set(DFSConfigKeys.DFS_CLIENT_CONTEXT, contextName);
configuration.setLong(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY,
100000000L);
BlockReaderTestUtil util = new BlockReaderTestUtil(1, configuration);
final Path testFile = new Path("/testConnCache.dat");
byte authenticData[] = util.writeFile(testFile, FILE_SIZE / 1024);
DFSClient client = new DFSClient(
new InetSocketAddress("localhost",
util.getCluster().getNameNodePort()), util.getConf());
ClientContext cacheContext =
ClientContext.get(contextName, client.getConf());
DFSInputStream in = client.open(testFile.toString());
LOG.info("opened " + testFile.toString());
byte[] dataBuf = new byte[BLOCK_SIZE];
// Initial read
pread(in, 0, dataBuf, 0, dataBuf.length, authenticData);
// Read again and verify that the socket is the same
pread(in, FILE_SIZE - dataBuf.length, dataBuf, 0, dataBuf.length,
authenticData);
pread(in, 1024, dataBuf, 0, dataBuf.length, authenticData);
// No seek; just read
pread(in, -1, dataBuf, 0, dataBuf.length, authenticData);
pread(in, 64, dataBuf, 0, dataBuf.length / 2, authenticData);
in.close();
client.close();
Assert.assertEquals(1,
ClientContext.getFromConf(configuration).getPeerCache().size());
}
}
| 4,560 | 36.081301 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.fail;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
/**
* Test abandoning blocks, which clients do on pipeline creation failure.
*/
public class TestAbandonBlock {
public static final Log LOG = LogFactory.getLog(TestAbandonBlock.class);
private static final Configuration CONF = new HdfsConfiguration();
static final String FILE_NAME_PREFIX
= "/" + TestAbandonBlock.class.getSimpleName() + "_";
private MiniDFSCluster cluster;
private DistributedFileSystem fs;
@Before
public void setUp() throws Exception {
cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build();
fs = cluster.getFileSystem();
cluster.waitActive();
}
@After
public void tearDown() throws Exception {
fs.close();
cluster.shutdown();
}
@Test
/** Abandon a block while creating a file */
public void testAbandonBlock() throws IOException {
String src = FILE_NAME_PREFIX + "foo";
// Start writing a file but do not close it
FSDataOutputStream fout = fs.create(new Path(src), true, 4096, (short)1, 512L);
for (int i = 0; i < 1024; i++) {
fout.write(123);
}
fout.hflush();
long fileId = ((DFSOutputStream)fout.getWrappedStream()).getFileId();
// Now abandon the last block
DFSClient dfsclient = DFSClientAdapter.getDFSClient(fs);
LocatedBlocks blocks =
dfsclient.getNamenode().getBlockLocations(src, 0, Integer.MAX_VALUE);
int orginalNumBlocks = blocks.locatedBlockCount();
LocatedBlock b = blocks.getLastLocatedBlock();
dfsclient.getNamenode().abandonBlock(b.getBlock(), fileId, src,
dfsclient.clientName);
// call abandonBlock again to make sure the operation is idempotent
dfsclient.getNamenode().abandonBlock(b.getBlock(), fileId, src,
dfsclient.clientName);
// And close the file
fout.close();
// Close cluster and check the block has been abandoned after restart
cluster.restartNameNode();
blocks = dfsclient.getNamenode().getBlockLocations(src, 0,
Integer.MAX_VALUE);
Assert.assertEquals("Blocks " + b + " has not been abandoned.",
orginalNumBlocks, blocks.locatedBlockCount() + 1);
}
@Test
/** Make sure that the quota is decremented correctly when a block is abandoned */
public void testQuotaUpdatedWhenBlockAbandoned() throws IOException {
// Setting diskspace quota to 3MB
fs.setQuota(new Path("/"), HdfsConstants.QUOTA_DONT_SET, 3 * 1024 * 1024);
// Start writing a file with 2 replicas to ensure each datanode has one.
// Block Size is 1MB.
String src = FILE_NAME_PREFIX + "test_quota1";
FSDataOutputStream fout = fs.create(new Path(src), true, 4096, (short)2, 1024 * 1024);
for (int i = 0; i < 1024; i++) {
fout.writeByte(123);
}
// Shutdown one datanode, causing the block abandonment.
cluster.getDataNodes().get(0).shutdown();
// Close the file, new block will be allocated with 2MB pending size.
try {
fout.close();
} catch (QuotaExceededException e) {
fail("Unexpected quota exception when closing fout");
}
}
}
| 4,481 | 34.856 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import java.util.HashMap;
import java.util.Map;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Time;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
/** Unit tests for permission */
public class TestDFSPermission {
public static final Log LOG = LogFactory.getLog(TestDFSPermission.class);
final private static Configuration conf = new HdfsConfiguration();
final private static String GROUP1_NAME = "group1";
final private static String GROUP2_NAME = "group2";
final private static String GROUP3_NAME = "group3";
final private static String GROUP4_NAME = "group4";
final private static String USER1_NAME = "user1";
final private static String USER2_NAME = "user2";
final private static String USER3_NAME = "user3";
private static final UserGroupInformation SUPERUSER;
private static final UserGroupInformation USER1;
private static final UserGroupInformation USER2;
private static final UserGroupInformation USER3;
final private static short MAX_PERMISSION = 511;
final private static short DEFAULT_UMASK = 022;
final private static FsPermission DEFAULT_PERMISSION =
FsPermission.createImmutable((short) 0777);
final static private int NUM_TEST_PERMISSIONS =
conf.getInt("test.dfs.permission.num", 10) * (MAX_PERMISSION + 1) / 100;
final private static String PATH_NAME = "xx";
final private static Path FILE_DIR_PATH = new Path("/", PATH_NAME);
final private static Path NON_EXISTENT_PATH = new Path("/parent", PATH_NAME);
final private static Path NON_EXISTENT_FILE = new Path("/NonExistentFile");
private FileSystem fs;
private MiniDFSCluster cluster;
private static final Random r;
static {
try {
// Initiate the random number generator and logging the seed
long seed = Time.now();
r = new Random(seed);
LOG.info("Random number generator uses seed " + seed);
LOG.info("NUM_TEST_PERMISSIONS=" + NUM_TEST_PERMISSIONS);
// explicitly turn on permission checking
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
// create fake mapping for the groups
Map<String, String[]> u2g_map = new HashMap<String, String[]> (3);
u2g_map.put(USER1_NAME, new String[] {GROUP1_NAME, GROUP2_NAME });
u2g_map.put(USER2_NAME, new String[] {GROUP2_NAME, GROUP3_NAME });
u2g_map.put(USER3_NAME, new String[] {GROUP3_NAME, GROUP4_NAME });
DFSTestUtil.updateConfWithFakeGroupMapping(conf, u2g_map);
// Initiate all four users
SUPERUSER = UserGroupInformation.getCurrentUser();
USER1 = UserGroupInformation.createUserForTesting(USER1_NAME,
new String[] { GROUP1_NAME, GROUP2_NAME });
USER2 = UserGroupInformation.createUserForTesting(USER2_NAME,
new String[] { GROUP2_NAME, GROUP3_NAME });
USER3 = UserGroupInformation.createUserForTesting(USER3_NAME,
new String[] { GROUP3_NAME, GROUP4_NAME });
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Before
public void setUp() throws IOException {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
}
@After
public void tearDown() throws IOException {
if (cluster != null) {
cluster.shutdown();
}
}
/** This tests if permission setting in create, mkdir, and
* setPermission works correctly
*/
@Test
public void testPermissionSetting() throws Exception {
testPermissionSetting(OpType.CREATE); // test file creation
testPermissionSetting(OpType.MKDIRS); // test directory creation
}
private void initFileSystem(short umask) throws Exception {
// set umask in configuration, converting to padded octal
conf.set(FsPermission.UMASK_LABEL, String.format("%1$03o", umask));
fs = FileSystem.get(conf);
}
private void closeFileSystem() throws Exception {
fs.close();
}
/* check permission setting works correctly for file or directory */
private void testPermissionSetting(OpType op) throws Exception {
short uMask = DEFAULT_UMASK;
// case 1: use default permission but all possible umasks
PermissionGenerator generator = new PermissionGenerator(r);
FsPermission permission = new FsPermission(DEFAULT_PERMISSION);
for (short i = 0; i < NUM_TEST_PERMISSIONS; i++) {
uMask = generator.next();
initFileSystem(uMask);
createAndCheckPermission(op, FILE_DIR_PATH, uMask, permission, true);
closeFileSystem();
}
// case 2: use permission 0643 and the default umask
uMask = DEFAULT_UMASK;
initFileSystem(uMask);
createAndCheckPermission(op, FILE_DIR_PATH, uMask, new FsPermission(
(short) 0643), true);
closeFileSystem();
// case 3: use permission 0643 and umask 0222
uMask = (short) 0222;
initFileSystem(uMask);
createAndCheckPermission(op, FILE_DIR_PATH, uMask, new FsPermission(
(short) 0643), false);
closeFileSystem();
// case 4: set permission
uMask = (short) 0111;
initFileSystem(uMask);
fs.setPermission(FILE_DIR_PATH, new FsPermission(uMask));
short expectedPermission = (short) 0111;
checkPermission(FILE_DIR_PATH, expectedPermission, true);
closeFileSystem();
// case 5: test non-existent parent directory
uMask = DEFAULT_UMASK;
initFileSystem(uMask);
assertFalse("File shouldn't exists", fs.exists(NON_EXISTENT_PATH));
createAndCheckPermission(op, NON_EXISTENT_PATH, uMask, new FsPermission(
DEFAULT_PERMISSION), false);
Path parent = NON_EXISTENT_PATH.getParent();
checkPermission(parent, getPermission(parent.getParent()), true);
closeFileSystem();
}
/* get the permission of a file/directory */
private short getPermission(Path path) throws IOException {
return fs.getFileStatus(path).getPermission().toShort();
}
/* create a file/directory with the default umask and permission */
private void create(OpType op, Path name) throws IOException {
create(op, name, DEFAULT_UMASK, new FsPermission(DEFAULT_PERMISSION));
}
/* create a file/directory with the given umask and permission */
private void create(OpType op, Path name, short umask,
FsPermission permission) throws IOException {
// set umask in configuration, converting to padded octal
conf.set(FsPermission.UMASK_LABEL, String.format("%1$03o", umask));
// create the file/directory
switch (op) {
case CREATE:
FSDataOutputStream out = fs.create(name, permission, true,
conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
fs.getDefaultReplication(name), fs.getDefaultBlockSize(name), null);
out.close();
break;
case MKDIRS:
fs.mkdirs(name, permission);
break;
default:
throw new IOException("Unsupported operation: " + op);
}
}
/* create file/directory with the provided umask and permission; then it
* checks if the permission is set correctly;
* If the delete flag is true, delete the file afterwards; otherwise leave
* it in the file system.
*/
private void createAndCheckPermission(OpType op, Path name, short umask,
FsPermission permission, boolean delete) throws Exception {
// create the file/directory
create(op, name, umask, permission);
// get the short form of the permission
short permissionNum = (DEFAULT_PERMISSION.equals(permission)) ? MAX_PERMISSION
: permission.toShort();
// get the expected permission
short expectedPermission = (op == OpType.CREATE) ? (short) (~umask
& permissionNum) : (short) (~umask & permissionNum);
// check if permission is correctly set
checkPermission(name, expectedPermission, delete);
}
/* Check if the permission of a file/directory is the same as the
* expected permission; If the delete flag is true, delete the
* file/directory afterwards.
*/
private void checkPermission(Path name, short expectedPermission,
boolean delete) throws IOException {
try {
// check its permission
assertEquals(getPermission(name), expectedPermission);
} finally {
// delete the file
if (delete) {
fs.delete(name, true);
}
}
}
/**
* check that ImmutableFsPermission can be used as the argument
* to setPermission
*/
@Test
public void testImmutableFsPermission() throws IOException {
fs = FileSystem.get(conf);
// set the permission of the root to be world-wide rwx
fs.setPermission(new Path("/"),
FsPermission.createImmutable((short)0777));
}
/* check if the ownership of a file/directory is set correctly */
@Test
public void testOwnership() throws Exception {
testOwnership(OpType.CREATE); // test file creation
testOwnership(OpType.MKDIRS); // test directory creation
}
/* change a file/directory's owner and group.
* if expectDeny is set, expect an AccessControlException.
*/
private void setOwner(Path path, String owner, String group,
boolean expectDeny) throws IOException {
try {
String expectedOwner = (owner == null) ? getOwner(path) : owner;
String expectedGroup = (group == null) ? getGroup(path) : group;
fs.setOwner(path, owner, group);
checkOwnership(path, expectedOwner, expectedGroup);
assertFalse(expectDeny);
} catch(AccessControlException e) {
assertTrue(expectDeny);
}
}
/* check ownership is set correctly for a file or directory */
private void testOwnership(OpType op) throws Exception {
// case 1: superuser create a file/directory
fs = FileSystem.get(conf);
create(op, FILE_DIR_PATH, DEFAULT_UMASK,
new FsPermission(DEFAULT_PERMISSION));
checkOwnership(FILE_DIR_PATH, SUPERUSER.getShortUserName(),
getGroup(FILE_DIR_PATH.getParent()));
// case 2: superuser changes FILE_DIR_PATH's owner to be <user1, group3>
setOwner(FILE_DIR_PATH, USER1.getShortUserName(), GROUP3_NAME, false);
// case 3: user1 changes FILE_DIR_PATH's owner to be user2
login(USER1);
setOwner(FILE_DIR_PATH, USER2.getShortUserName(), null, true);
// case 4: user1 changes FILE_DIR_PATH's group to be group1 which it belongs
// to
setOwner(FILE_DIR_PATH, null, GROUP1_NAME, false);
// case 5: user1 changes FILE_DIR_PATH's group to be group3
// which it does not belong to
setOwner(FILE_DIR_PATH, null, GROUP3_NAME, true);
// case 6: user2 (non-owner) changes FILE_DIR_PATH's group to be group3
login(USER2);
setOwner(FILE_DIR_PATH, null, GROUP3_NAME, true);
// case 7: user2 (non-owner) changes FILE_DIR_PATH's user to be user2
setOwner(FILE_DIR_PATH, USER2.getShortUserName(), null, true);
// delete the file/directory
login(SUPERUSER);
fs.delete(FILE_DIR_PATH, true);
}
/* Return the group owner of the file/directory */
private String getGroup(Path path) throws IOException {
return fs.getFileStatus(path).getGroup();
}
/* Return the file owner of the file/directory */
private String getOwner(Path path) throws IOException {
return fs.getFileStatus(path).getOwner();
}
/* check if ownership is set correctly */
private void checkOwnership(Path name, String expectedOwner,
String expectedGroup) throws IOException {
// check its owner and group
FileStatus status = fs.getFileStatus(name);
assertEquals(status.getOwner(), expectedOwner);
assertEquals(status.getGroup(), expectedGroup);
}
final static private String ANCESTOR_NAME = "/ancestor";
final static private String PARENT_NAME = "parent";
final static private String FILE_NAME = "file";
final static private String DIR_NAME = "dir";
final static private String FILE_DIR_NAME = "filedir";
private enum OpType {CREATE, MKDIRS, OPEN, SET_REPLICATION,
GET_FILEINFO, IS_DIR, EXISTS, GET_CONTENT_LENGTH, LIST, RENAME, DELETE
};
/* Check if namenode performs permission checking correctly for
* superuser, file owner, group owner, and other users */
@Test
public void testPermissionChecking() throws Exception {
try {
fs = FileSystem.get(conf);
// set the permission of the root to be world-wide rwx
fs.setPermission(new Path("/"), new FsPermission((short)0777));
// create a directory hierarchy and sets random permission for each inode
PermissionGenerator ancestorPermissionGenerator =
new PermissionGenerator(r);
PermissionGenerator dirPermissionGenerator = new PermissionGenerator(r);
PermissionGenerator filePermissionGenerator = new PermissionGenerator(r);
short[] ancestorPermissions = new short[NUM_TEST_PERMISSIONS];
short[] parentPermissions = new short[NUM_TEST_PERMISSIONS];
short[] permissions = new short[NUM_TEST_PERMISSIONS];
Path[] ancestorPaths = new Path[NUM_TEST_PERMISSIONS];
Path[] parentPaths = new Path[NUM_TEST_PERMISSIONS];
Path[] filePaths = new Path[NUM_TEST_PERMISSIONS];
Path[] dirPaths = new Path[NUM_TEST_PERMISSIONS];
for (int i = 0; i < NUM_TEST_PERMISSIONS; i++) {
// create ancestor directory
ancestorPaths[i] = new Path(ANCESTOR_NAME + i);
create(OpType.MKDIRS, ancestorPaths[i]);
fs.setOwner(ancestorPaths[i], USER1_NAME, GROUP2_NAME);
// create parent directory
parentPaths[i] = new Path(ancestorPaths[i], PARENT_NAME + i);
create(OpType.MKDIRS, parentPaths[i]);
// change parent directory's ownership to be user1
fs.setOwner(parentPaths[i], USER1_NAME, GROUP2_NAME);
filePaths[i] = new Path(parentPaths[i], FILE_NAME + i);
dirPaths[i] = new Path(parentPaths[i], DIR_NAME + i);
// makes sure that each inode at the same level
// has a different permission
ancestorPermissions[i] = ancestorPermissionGenerator.next();
parentPermissions[i] = dirPermissionGenerator.next();
permissions[i] = filePermissionGenerator.next();
fs.setPermission(ancestorPaths[i], new FsPermission(
ancestorPermissions[i]));
fs.setPermission(parentPaths[i], new FsPermission(
parentPermissions[i]));
}
/* file owner */
testPermissionCheckingPerUser(USER1, ancestorPermissions,
parentPermissions, permissions, parentPaths, filePaths, dirPaths);
/* group owner */
testPermissionCheckingPerUser(USER2, ancestorPermissions,
parentPermissions, permissions, parentPaths, filePaths, dirPaths);
/* other owner */
testPermissionCheckingPerUser(USER3, ancestorPermissions,
parentPermissions, permissions, parentPaths, filePaths, dirPaths);
/* super owner */
testPermissionCheckingPerUser(SUPERUSER, ancestorPermissions,
parentPermissions, permissions, parentPaths, filePaths, dirPaths);
} finally {
fs.close();
}
}
@Test
public void testAccessOwner() throws IOException, InterruptedException {
FileSystem rootFs = FileSystem.get(conf);
Path p1 = new Path("/p1");
rootFs.mkdirs(p1);
rootFs.setOwner(p1, USER1_NAME, GROUP1_NAME);
fs = USER1.doAs(new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws Exception {
return FileSystem.get(conf);
}
});
fs.setPermission(p1, new FsPermission((short) 0444));
fs.access(p1, FsAction.READ);
try {
fs.access(p1, FsAction.WRITE);
fail("The access call should have failed.");
} catch (AccessControlException e) {
assertTrue("Permission denied messages must carry the username",
e.getMessage().contains(USER1_NAME));
assertTrue("Permission denied messages must carry the path parent",
e.getMessage().contains(
p1.getParent().toUri().getPath()));
}
Path badPath = new Path("/bad/bad");
try {
fs.access(badPath, FsAction.READ);
fail("The access call should have failed");
} catch (FileNotFoundException e) {
// expected
}
}
@Test
public void testAccessGroupMember() throws IOException, InterruptedException {
FileSystem rootFs = FileSystem.get(conf);
Path p2 = new Path("/p2");
rootFs.mkdirs(p2);
rootFs.setOwner(p2, UserGroupInformation.getCurrentUser().getShortUserName(), GROUP1_NAME);
rootFs.setPermission(p2, new FsPermission((short) 0740));
fs = USER1.doAs(new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws Exception {
return FileSystem.get(conf);
}
});
fs.access(p2, FsAction.READ);
try {
fs.access(p2, FsAction.EXECUTE);
fail("The access call should have failed.");
} catch (AccessControlException e) {
assertTrue("Permission denied messages must carry the username",
e.getMessage().contains(USER1_NAME));
assertTrue("Permission denied messages must carry the path parent",
e.getMessage().contains(
p2.getParent().toUri().getPath()));
}
}
@Test
public void testAccessOthers() throws IOException, InterruptedException {
FileSystem rootFs = FileSystem.get(conf);
Path p3 = new Path("/p3");
rootFs.mkdirs(p3);
rootFs.setPermission(p3, new FsPermission((short) 0774));
fs = USER1.doAs(new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws Exception {
return FileSystem.get(conf);
}
});
fs.access(p3, FsAction.READ);
try {
fs.access(p3, FsAction.READ_WRITE);
fail("The access call should have failed.");
} catch (AccessControlException e) {
assertTrue("Permission denied messages must carry the username",
e.getMessage().contains(USER1_NAME));
assertTrue("Permission denied messages must carry the path parent",
e.getMessage().contains(
p3.getParent().toUri().getPath()));
}
}
/* Check if namenode performs permission checking correctly
* for the given user for operations mkdir, open, setReplication,
* getFileInfo, isDirectory, exists, getContentLength, list, rename,
* and delete */
private void testPermissionCheckingPerUser(UserGroupInformation ugi,
short[] ancestorPermission, short[] parentPermission,
short[] filePermission, Path[] parentDirs, Path[] files, Path[] dirs)
throws Exception {
boolean[] isDirEmpty = new boolean[NUM_TEST_PERMISSIONS];
login(SUPERUSER);
for (int i = 0; i < NUM_TEST_PERMISSIONS; i++) {
create(OpType.CREATE, files[i]);
create(OpType.MKDIRS, dirs[i]);
fs.setOwner(files[i], USER1_NAME, GROUP2_NAME);
fs.setOwner(dirs[i], USER1_NAME, GROUP2_NAME);
checkOwnership(dirs[i], USER1_NAME, GROUP2_NAME);
checkOwnership(files[i], USER1_NAME, GROUP2_NAME);
FsPermission fsPermission = new FsPermission(filePermission[i]);
fs.setPermission(files[i], fsPermission);
fs.setPermission(dirs[i], fsPermission);
isDirEmpty[i] = (fs.listStatus(dirs[i]).length == 0);
}
login(ugi);
for (int i = 0; i < NUM_TEST_PERMISSIONS; i++) {
testCreateMkdirs(ugi, new Path(parentDirs[i], FILE_DIR_NAME),
ancestorPermission[i], parentPermission[i]);
testOpen(ugi, files[i], ancestorPermission[i], parentPermission[i],
filePermission[i]);
testSetReplication(ugi, files[i], ancestorPermission[i],
parentPermission[i], filePermission[i]);
testSetTimes(ugi, files[i], ancestorPermission[i],
parentPermission[i], filePermission[i]);
testStats(ugi, files[i], ancestorPermission[i], parentPermission[i]);
testList(ugi, files[i], dirs[i], ancestorPermission[i],
parentPermission[i], filePermission[i]);
int next = i == NUM_TEST_PERMISSIONS - 1 ? 0 : i + 1;
testRename(ugi, files[i], files[next], ancestorPermission[i],
parentPermission[i], ancestorPermission[next], parentPermission[next]);
testDeleteFile(ugi, files[i], ancestorPermission[i], parentPermission[i]);
testDeleteDir(ugi, dirs[i], ancestorPermission[i], parentPermission[i],
filePermission[i], null, isDirEmpty[i]);
}
// test non existent file
checkNonExistentFile();
}
/* A random permission generator that guarantees that each permission
* value is generated only once.
*/
static private class PermissionGenerator {
private final Random r;
private final short[] permissions = new short[MAX_PERMISSION + 1];
private int numLeft = MAX_PERMISSION + 1;
PermissionGenerator(Random r) {
this.r = r;
for (int i = 0; i <= MAX_PERMISSION; i++) {
permissions[i] = (short) i;
}
}
short next() throws IOException {
if (numLeft == 0) {
throw new IOException("No more permission is avaialbe");
}
int index = r.nextInt(numLeft); // choose which permission to return
numLeft--; // decrement the counter
// swap the chosen permission with last available permission in the array
short temp = permissions[numLeft];
permissions[numLeft] = permissions[index];
permissions[index] = temp;
return permissions[numLeft];
}
}
/* A base class that verifies the permission checking is correct
* for an operation */
abstract class PermissionVerifier {
protected Path path;
protected short ancestorPermission;
protected short parentPermission;
private short permission;
protected short requiredAncestorPermission;
protected short requiredParentPermission;
protected short requiredPermission;
final static protected short opAncestorPermission = SEARCH_MASK;
protected short opParentPermission;
protected short opPermission;
protected UserGroupInformation ugi;
/* initialize */
protected void set(Path path, short ancestorPermission,
short parentPermission, short permission) {
this.path = path;
this.ancestorPermission = ancestorPermission;
this.parentPermission = parentPermission;
this.permission = permission;
setOpPermission();
this.ugi = null;
}
/* Perform an operation and verify if the permission checking is correct */
void verifyPermission(UserGroupInformation ugi) throws IOException {
if (this.ugi != ugi) {
setRequiredPermissions(ugi);
this.ugi = ugi;
}
try {
try {
call();
assertFalse(expectPermissionDeny());
} catch(AccessControlException e) {
assertTrue(expectPermissionDeny());
}
} catch (AssertionError ae) {
logPermissions();
throw ae;
}
}
/** Log the permissions and required permissions */
protected void logPermissions() {
LOG.info("required ancestor permission:"
+ Integer.toOctalString(requiredAncestorPermission));
LOG.info("ancestor permission: "
+ Integer.toOctalString(ancestorPermission));
LOG.info("required parent permission:"
+ Integer.toOctalString(requiredParentPermission));
LOG.info("parent permission: " + Integer.toOctalString(parentPermission));
LOG.info("required permission:"
+ Integer.toOctalString(requiredPermission));
LOG.info("permission: " + Integer.toOctalString(permission));
}
/* Return true if an AccessControlException is expected */
protected boolean expectPermissionDeny() {
return (requiredPermission & permission) != requiredPermission
|| (requiredParentPermission & parentPermission) !=
requiredParentPermission
|| (requiredAncestorPermission & ancestorPermission) !=
requiredAncestorPermission;
}
/* Set the permissions required to pass the permission checking */
protected void setRequiredPermissions(UserGroupInformation ugi) {
if (SUPERUSER.equals(ugi)) {
requiredAncestorPermission = SUPER_MASK;
requiredParentPermission = SUPER_MASK;
requiredPermission = SUPER_MASK;
} else if (USER1.equals(ugi)) {
requiredAncestorPermission = (short)(opAncestorPermission & OWNER_MASK);
requiredParentPermission = (short)(opParentPermission & OWNER_MASK);
requiredPermission = (short)(opPermission & OWNER_MASK);
} else if (USER2.equals(ugi)) {
requiredAncestorPermission = (short)(opAncestorPermission & GROUP_MASK);
requiredParentPermission = (short)(opParentPermission & GROUP_MASK);
requiredPermission = (short)(opPermission & GROUP_MASK);
} else if (USER3.equals(ugi)) {
requiredAncestorPermission = (short)(opAncestorPermission & OTHER_MASK);
requiredParentPermission = (short)(opParentPermission & OTHER_MASK);
requiredPermission = (short)(opPermission & OTHER_MASK);
} else {
throw new IllegalArgumentException("Non-supported user: " + ugi);
}
}
/* Set the rwx permissions required for the operation */
abstract void setOpPermission();
/* Perform the operation */
abstract void call() throws IOException;
}
final static private short SUPER_MASK = 0;
final static private short READ_MASK = 0444;
final static private short WRITE_MASK = 0222;
final static private short SEARCH_MASK = 0111;
final static private short NULL_MASK = 0;
final static private short OWNER_MASK = 0700;
final static private short GROUP_MASK = 0070;
final static private short OTHER_MASK = 0007;
/* A class that verifies the permission checking is correct for create/mkdir*/
private class CreatePermissionVerifier extends PermissionVerifier {
private OpType opType;
private boolean cleanup = true;
/* initialize */
protected void set(Path path, OpType opType, short ancestorPermission,
short parentPermission) {
super.set(path, ancestorPermission, parentPermission, NULL_MASK);
setOpType(opType);
}
void setCleanup(boolean cleanup) {
this.cleanup = cleanup;
}
/* set if the operation mkdir/create */
void setOpType(OpType opType) {
this.opType = opType;
}
@Override
void setOpPermission() {
this.opParentPermission = SEARCH_MASK | WRITE_MASK;
}
@Override
void call() throws IOException {
create(opType, path);
if (cleanup) {
fs.delete(path, true);
}
}
}
private final CreatePermissionVerifier createVerifier =
new CreatePermissionVerifier();
/* test if the permission checking of create/mkdir is correct */
private void testCreateMkdirs(UserGroupInformation ugi, Path path,
short ancestorPermission, short parentPermission) throws Exception {
createVerifier.set(path, OpType.MKDIRS, ancestorPermission,
parentPermission);
createVerifier.verifyPermission(ugi);
createVerifier.setOpType(OpType.CREATE);
createVerifier.setCleanup(false);
createVerifier.verifyPermission(ugi);
createVerifier.setCleanup(true);
createVerifier.verifyPermission(ugi); // test overWritten
}
/* A class that verifies the permission checking is correct for open */
private class OpenPermissionVerifier extends PermissionVerifier {
@Override
void setOpPermission() {
this.opParentPermission = SEARCH_MASK;
this.opPermission = READ_MASK;
}
@Override
void call() throws IOException {
FSDataInputStream in = fs.open(path);
in.close();
}
}
private final OpenPermissionVerifier openVerifier = new OpenPermissionVerifier();
/* test if the permission checking of open is correct */
private void testOpen(UserGroupInformation ugi, Path path,
short ancestorPermission, short parentPermission, short filePermission)
throws Exception {
openVerifier
.set(path, ancestorPermission, parentPermission, filePermission);
openVerifier.verifyPermission(ugi);
}
/* A class that verifies the permission checking is correct for
* setReplication */
private class SetReplicationPermissionVerifier extends PermissionVerifier {
@Override
void setOpPermission() {
this.opParentPermission = SEARCH_MASK;
this.opPermission = WRITE_MASK;
}
@Override
void call() throws IOException {
fs.setReplication(path, (short) 1);
}
}
private final SetReplicationPermissionVerifier replicatorVerifier =
new SetReplicationPermissionVerifier();
/* test if the permission checking of setReplication is correct */
private void testSetReplication(UserGroupInformation ugi, Path path,
short ancestorPermission, short parentPermission, short filePermission)
throws Exception {
replicatorVerifier.set(path, ancestorPermission, parentPermission,
filePermission);
replicatorVerifier.verifyPermission(ugi);
}
/* A class that verifies the permission checking is correct for
* setTimes */
private class SetTimesPermissionVerifier extends PermissionVerifier {
@Override
void setOpPermission() {
this.opParentPermission = SEARCH_MASK;
this.opPermission = WRITE_MASK;
}
@Override
void call() throws IOException {
fs.setTimes(path, 100, 100);
fs.setTimes(path, -1, 100);
fs.setTimes(path, 100, -1);
}
}
private final SetTimesPermissionVerifier timesVerifier =
new SetTimesPermissionVerifier();
/* test if the permission checking of setReplication is correct */
private void testSetTimes(UserGroupInformation ugi, Path path,
short ancestorPermission, short parentPermission, short filePermission)
throws Exception {
timesVerifier.set(path, ancestorPermission, parentPermission,
filePermission);
timesVerifier.verifyPermission(ugi);
}
/* A class that verifies the permission checking is correct for isDirectory,
* exist, getFileInfo, getContentSummary */
private class StatsPermissionVerifier extends PermissionVerifier {
OpType opType;
/* initialize */
void set(Path path, OpType opType, short ancestorPermission,
short parentPermission) {
super.set(path, ancestorPermission, parentPermission, NULL_MASK);
setOpType(opType);
}
/* set if operation is getFileInfo, isDirectory, exist, getContenSummary */
void setOpType(OpType opType) {
this.opType = opType;
}
@Override
void setOpPermission() {
this.opParentPermission = SEARCH_MASK;
}
@Override
void call() throws IOException {
switch (opType) {
case GET_FILEINFO:
fs.getFileStatus(path);
break;
case IS_DIR:
fs.isDirectory(path);
break;
case EXISTS:
fs.exists(path);
break;
case GET_CONTENT_LENGTH:
fs.getContentSummary(path).getLength();
break;
default:
throw new IllegalArgumentException("Unexpected operation type: "
+ opType);
}
}
}
private final StatsPermissionVerifier statsVerifier = new StatsPermissionVerifier();
/* test if the permission checking of isDirectory, exist,
* getFileInfo, getContentSummary is correct */
private void testStats(UserGroupInformation ugi, Path path,
short ancestorPermission, short parentPermission) throws Exception {
statsVerifier.set(path, OpType.GET_FILEINFO, ancestorPermission,
parentPermission);
statsVerifier.verifyPermission(ugi);
statsVerifier.setOpType(OpType.IS_DIR);
statsVerifier.verifyPermission(ugi);
statsVerifier.setOpType(OpType.EXISTS);
statsVerifier.verifyPermission(ugi);
statsVerifier.setOpType(OpType.GET_CONTENT_LENGTH);
statsVerifier.verifyPermission(ugi);
}
private enum InodeType {
FILE, DIR
};
/* A class that verifies the permission checking is correct for list */
private class ListPermissionVerifier extends PermissionVerifier {
private InodeType inodeType;
/* initialize */
void set(Path path, InodeType inodeType, short ancestorPermission,
short parentPermission, short permission) {
this.inodeType = inodeType;
super.set(path, ancestorPermission, parentPermission, permission);
}
/* set if the given path is a file/directory */
void setInodeType(Path path, InodeType inodeType) {
this.path = path;
this.inodeType = inodeType;
setOpPermission();
this.ugi = null;
}
@Override
void setOpPermission() {
this.opParentPermission = SEARCH_MASK;
switch (inodeType) {
case FILE:
this.opPermission = 0;
break;
case DIR:
this.opPermission = READ_MASK | SEARCH_MASK;
break;
default:
throw new IllegalArgumentException("Illegal inode type: " + inodeType);
}
}
@Override
void call() throws IOException {
fs.listStatus(path);
}
}
final ListPermissionVerifier listVerifier = new ListPermissionVerifier();
/* test if the permission checking of list is correct */
private void testList(UserGroupInformation ugi, Path file, Path dir,
short ancestorPermission, short parentPermission, short filePermission)
throws Exception {
listVerifier.set(file, InodeType.FILE, ancestorPermission,
parentPermission, filePermission);
listVerifier.verifyPermission(ugi);
listVerifier.setInodeType(dir, InodeType.DIR);
listVerifier.verifyPermission(ugi);
}
/* A class that verifies the permission checking is correct for rename */
private class RenamePermissionVerifier extends PermissionVerifier {
private Path dst;
private short dstAncestorPermission;
private short dstParentPermission;
/* initialize */
void set(Path src, short srcAncestorPermission, short srcParentPermission,
Path dst, short dstAncestorPermission, short dstParentPermission) {
super.set(src, srcAncestorPermission, srcParentPermission, NULL_MASK);
this.dst = dst;
this.dstAncestorPermission = dstAncestorPermission;
this.dstParentPermission = dstParentPermission;
}
@Override
void setOpPermission() {
opParentPermission = SEARCH_MASK | WRITE_MASK;
}
@Override
void call() throws IOException {
fs.rename(path, dst);
}
@Override
protected boolean expectPermissionDeny() {
return super.expectPermissionDeny()
|| (requiredParentPermission & dstParentPermission) !=
requiredParentPermission
|| (requiredAncestorPermission & dstAncestorPermission) !=
requiredAncestorPermission;
}
@Override
protected void logPermissions() {
super.logPermissions();
LOG.info("dst ancestor permission: "
+ Integer.toOctalString(dstAncestorPermission));
LOG.info("dst parent permission: "
+ Integer.toOctalString(dstParentPermission));
}
}
final RenamePermissionVerifier renameVerifier = new RenamePermissionVerifier();
/* test if the permission checking of rename is correct */
private void testRename(UserGroupInformation ugi, Path src, Path dst,
short srcAncestorPermission, short srcParentPermission,
short dstAncestorPermission, short dstParentPermission) throws Exception {
renameVerifier.set(src, srcAncestorPermission, srcParentPermission, dst,
dstAncestorPermission, dstParentPermission);
renameVerifier.verifyPermission(ugi);
}
/* A class that verifies the permission checking is correct for delete */
private class DeletePermissionVerifier extends PermissionVerifier {
void set(Path path, short ancestorPermission, short parentPermission) {
super.set(path, ancestorPermission, parentPermission, NULL_MASK);
}
@Override
void setOpPermission() {
this.opParentPermission = SEARCH_MASK | WRITE_MASK;
}
@Override
void call() throws IOException {
fs.delete(path, true);
}
}
/* A class that verifies the permission checking is correct for
* directory deletion
*/
private class DeleteDirPermissionVerifier extends DeletePermissionVerifier {
private short[] childPermissions;
/* initialize */
void set(Path path, short ancestorPermission, short parentPermission,
short permission, short[] childPermissions) {
set(path, ancestorPermission, parentPermission, permission);
this.childPermissions = childPermissions;
}
@Override
void setOpPermission() {
this.opParentPermission = SEARCH_MASK | WRITE_MASK;
this.opPermission = SEARCH_MASK | WRITE_MASK | READ_MASK;
}
@Override
protected boolean expectPermissionDeny() {
if (super.expectPermissionDeny()) {
return true;
} else {
if (childPermissions != null) {
for (short childPermission : childPermissions) {
if ((requiredPermission & childPermission) != requiredPermission) {
return true;
}
}
}
return false;
}
}
}
/* A class that verifies the permission checking is correct for
* empty-directory deletion
*/
private class DeleteEmptyDirPermissionVerifier extends DeleteDirPermissionVerifier {
@Override
void setOpPermission() {
this.opParentPermission = SEARCH_MASK | WRITE_MASK;
this.opPermission = NULL_MASK;
}
}
final DeletePermissionVerifier fileDeletionVerifier =
new DeletePermissionVerifier();
/* test if the permission checking of file deletion is correct */
private void testDeleteFile(UserGroupInformation ugi, Path file,
short ancestorPermission, short parentPermission) throws Exception {
fileDeletionVerifier.set(file, ancestorPermission, parentPermission);
fileDeletionVerifier.verifyPermission(ugi);
}
final DeleteDirPermissionVerifier dirDeletionVerifier =
new DeleteDirPermissionVerifier();
final DeleteEmptyDirPermissionVerifier emptyDirDeletionVerifier =
new DeleteEmptyDirPermissionVerifier();
/* test if the permission checking of directory deletion is correct */
private void testDeleteDir(UserGroupInformation ugi, Path path,
short ancestorPermission, short parentPermission, short permission,
short[] childPermissions,
final boolean isDirEmpty) throws Exception {
DeleteDirPermissionVerifier ddpv = isDirEmpty?
emptyDirDeletionVerifier : dirDeletionVerifier;
ddpv.set(path, ancestorPermission, parentPermission, permission,
childPermissions);
ddpv.verifyPermission(ugi);
}
/* log into dfs as the given user */
private void login(UserGroupInformation ugi) throws IOException,
InterruptedException {
if (fs != null) {
fs.close();
}
fs = DFSTestUtil.getFileSystemAs(ugi, conf);
}
/* test non-existent file */
private void checkNonExistentFile() {
try {
assertFalse(fs.exists(NON_EXISTENT_FILE));
} catch (IOException e) {
checkNoPermissionDeny(e);
}
try {
fs.open(NON_EXISTENT_FILE);
} catch (IOException e) {
checkNoPermissionDeny(e);
}
try {
fs.setReplication(NON_EXISTENT_FILE, (short)4);
} catch (IOException e) {
checkNoPermissionDeny(e);
}
try {
fs.getFileStatus(NON_EXISTENT_FILE);
} catch (IOException e) {
checkNoPermissionDeny(e);
}
try {
fs.getContentSummary(NON_EXISTENT_FILE).getLength();
} catch (IOException e) {
checkNoPermissionDeny(e);
}
try {
fs.listStatus(NON_EXISTENT_FILE);
} catch (IOException e) {
checkNoPermissionDeny(e);
}
try {
fs.delete(NON_EXISTENT_FILE, true);
} catch (IOException e) {
checkNoPermissionDeny(e);
}
try {
fs.rename(NON_EXISTENT_FILE, new Path(NON_EXISTENT_FILE+".txt"));
} catch (IOException e) {
checkNoPermissionDeny(e);
}
}
private void checkNoPermissionDeny(IOException e) {
assertFalse(e instanceof AccessControlException);
}
}
| 41,727 | 35.253692 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRemoteBlockReader2.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
public class TestRemoteBlockReader2 extends TestBlockReaderBase {
HdfsConfiguration createConf() {
HdfsConfiguration conf = new HdfsConfiguration();
return conf;
}
}
| 1,018 | 38.192308 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHttpPolicy.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.apache.hadoop.http.HttpConfig.Policy.HTTP_AND_HTTPS;
import static org.apache.hadoop.http.HttpConfig.Policy.HTTP_ONLY;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration;
import org.junit.Assert;
import org.junit.Test;
public final class TestHttpPolicy {
@Test(expected = HadoopIllegalArgumentException.class)
public void testInvalidPolicyValue() {
Configuration conf = new Configuration();
conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, "invalid");
DFSUtil.getHttpPolicy(conf);
}
@Test
public void testDeprecatedConfiguration() {
Configuration conf = new Configuration(false);
Assert.assertSame(HTTP_ONLY, DFSUtil.getHttpPolicy(conf));
conf.setBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY, true);
Assert.assertSame(HTTP_AND_HTTPS, DFSUtil.getHttpPolicy(conf));
conf = new Configuration(false);
conf.setBoolean(DFSConfigKeys.HADOOP_SSL_ENABLED_KEY, true);
Assert.assertSame(HTTP_AND_HTTPS, DFSUtil.getHttpPolicy(conf));
conf = new Configuration(false);
conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HTTP_ONLY.name());
conf.setBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY, true);
Assert.assertSame(HTTP_ONLY, DFSUtil.getHttpPolicy(conf));
}
}
| 2,116 | 37.490909 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitRead.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.hamcrest.CoreMatchers.equalTo;
import java.io.File;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.net.unix.DomainSocket;
import org.apache.hadoop.net.unix.TemporarySocketDirectory;
import org.junit.AfterClass;
import org.junit.Assume;
import org.junit.Before;
import org.junit.BeforeClass;
public class TestParallelShortCircuitRead extends TestParallelReadUtil {
private static TemporarySocketDirectory sockDir;
@BeforeClass
static public void setupCluster() throws Exception {
if (DomainSocket.getLoadingFailureReason() != null) return;
DFSInputStream.tcpReadsDisabledForTesting = true;
sockDir = new TemporarySocketDirectory();
HdfsConfiguration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
new File(sockDir.getDir(), "TestParallelLocalRead.%d.sock").getAbsolutePath());
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY,
false);
DomainSocket.disableBindPathValidation();
setupCluster(1, conf);
}
@Before
public void before() {
Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
}
@AfterClass
static public void teardownCluster() throws Exception {
if (DomainSocket.getLoadingFailureReason() != null) return;
sockDir.close();
TestParallelReadUtil.teardownCluster();
}
}
| 2,311 | 36.290323 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.*;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.ipc.RemoteException;
import org.junit.Test;
/**
* This class tests if getblocks request works correctly.
*/
public class TestGetBlocks {
private static final int blockSize = 8192;
private static final String racks[] = new String[] { "/d1/r1", "/d1/r1",
"/d1/r2", "/d1/r2", "/d1/r2", "/d2/r3", "/d2/r3" };
private static final int numDatanodes = racks.length;
/**
* Stop the heartbeat of a datanode in the MiniDFSCluster
*
* @param cluster
* The MiniDFSCluster
* @param hostName
* The hostName of the datanode to be stopped
* @return The DataNode whose heartbeat has been stopped
*/
private DataNode stopDataNodeHeartbeat(MiniDFSCluster cluster, String hostName) {
for (DataNode dn : cluster.getDataNodes()) {
if (dn.getDatanodeId().getHostName().equals(hostName)) {
DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
return dn;
}
}
return null;
}
/**
* Test if the datanodes returned by
* {@link ClientProtocol#getBlockLocations(String, long, long)} is correct
* when stale nodes checking is enabled. Also test during the scenario when 1)
* stale nodes checking is enabled, 2) a writing is going on, 3) a datanode
* becomes stale happen simultaneously
*
* @throws Exception
*/
@Test
public void testReadSelectNonStaleDatanode() throws Exception {
HdfsConfiguration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true);
long staleInterval = 30 * 1000 * 60;
conf.setLong(DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY,
staleInterval);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(numDatanodes).racks(racks).build();
cluster.waitActive();
InetSocketAddress addr = new InetSocketAddress("localhost",
cluster.getNameNodePort());
DFSClient client = new DFSClient(addr, conf);
List<DatanodeDescriptor> nodeInfoList = cluster.getNameNode()
.getNamesystem().getBlockManager().getDatanodeManager()
.getDatanodeListForReport(DatanodeReportType.LIVE);
assertEquals("Unexpected number of datanodes", numDatanodes,
nodeInfoList.size());
FileSystem fileSys = cluster.getFileSystem();
FSDataOutputStream stm = null;
try {
// do the writing but do not close the FSDataOutputStream
// in order to mimic the ongoing writing
final Path fileName = new Path("/file1");
stm = fileSys.create(
fileName,
true,
fileSys.getConf().getInt(
CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
(short) 3, blockSize);
stm.write(new byte[(blockSize * 3) / 2]);
// We do not close the stream so that
// the writing seems to be still ongoing
stm.hflush();
LocatedBlocks blocks = client.getNamenode().getBlockLocations(
fileName.toString(), 0, blockSize);
DatanodeInfo[] nodes = blocks.get(0).getLocations();
assertEquals(nodes.length, 3);
DataNode staleNode = null;
DatanodeDescriptor staleNodeInfo = null;
// stop the heartbeat of the first node
staleNode = this.stopDataNodeHeartbeat(cluster, nodes[0].getHostName());
assertNotNull(staleNode);
// set the first node as stale
staleNodeInfo = cluster.getNameNode().getNamesystem().getBlockManager()
.getDatanodeManager()
.getDatanode(staleNode.getDatanodeId());
DFSTestUtil.resetLastUpdatesWithOffset(staleNodeInfo,
-(staleInterval + 1));
LocatedBlocks blocksAfterStale = client.getNamenode().getBlockLocations(
fileName.toString(), 0, blockSize);
DatanodeInfo[] nodesAfterStale = blocksAfterStale.get(0).getLocations();
assertEquals(nodesAfterStale.length, 3);
assertEquals(nodesAfterStale[2].getHostName(), nodes[0].getHostName());
// restart the staleNode's heartbeat
DataNodeTestUtils.setHeartbeatsDisabledForTests(staleNode, false);
// reset the first node as non-stale, so as to avoid two stale nodes
DFSTestUtil.resetLastUpdatesWithOffset(staleNodeInfo, 0);
LocatedBlock lastBlock = client.getLocatedBlocks(fileName.toString(), 0,
Long.MAX_VALUE).getLastLocatedBlock();
nodes = lastBlock.getLocations();
assertEquals(nodes.length, 3);
// stop the heartbeat of the first node for the last block
staleNode = this.stopDataNodeHeartbeat(cluster, nodes[0].getHostName());
assertNotNull(staleNode);
// set the node as stale
DatanodeDescriptor dnDesc = cluster.getNameNode().getNamesystem()
.getBlockManager().getDatanodeManager()
.getDatanode(staleNode.getDatanodeId());
DFSTestUtil.resetLastUpdatesWithOffset(dnDesc, -(staleInterval + 1));
LocatedBlock lastBlockAfterStale = client.getLocatedBlocks(
fileName.toString(), 0, Long.MAX_VALUE).getLastLocatedBlock();
nodesAfterStale = lastBlockAfterStale.getLocations();
assertEquals(nodesAfterStale.length, 3);
assertEquals(nodesAfterStale[2].getHostName(), nodes[0].getHostName());
} finally {
if (stm != null) {
stm.close();
}
client.close();
cluster.shutdown();
}
}
/** test getBlocks */
@Test
public void testGetBlocks() throws Exception {
final Configuration CONF = new HdfsConfiguration();
final short REPLICATION_FACTOR = (short) 2;
final int DEFAULT_BLOCK_SIZE = 1024;
final Random r = new Random();
CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(
REPLICATION_FACTOR).build();
try {
cluster.waitActive();
// create a file with two blocks
FileSystem fs = cluster.getFileSystem();
FSDataOutputStream out = fs.create(new Path("/tmp.txt"),
REPLICATION_FACTOR);
byte[] data = new byte[1024];
long fileLen = 2 * DEFAULT_BLOCK_SIZE;
long bytesToWrite = fileLen;
while (bytesToWrite > 0) {
r.nextBytes(data);
int bytesToWriteNext = (1024 < bytesToWrite) ? 1024
: (int) bytesToWrite;
out.write(data, 0, bytesToWriteNext);
bytesToWrite -= bytesToWriteNext;
}
out.close();
// get blocks & data nodes
List<LocatedBlock> locatedBlocks;
DatanodeInfo[] dataNodes = null;
boolean notWritten;
do {
final DFSClient dfsclient = new DFSClient(NameNode.getAddress(CONF),
CONF);
locatedBlocks = dfsclient.getNamenode()
.getBlockLocations("/tmp.txt", 0, fileLen).getLocatedBlocks();
assertEquals(2, locatedBlocks.size());
notWritten = false;
for (int i = 0; i < 2; i++) {
dataNodes = locatedBlocks.get(i).getLocations();
if (dataNodes.length != REPLICATION_FACTOR) {
notWritten = true;
try {
Thread.sleep(10);
} catch (InterruptedException e) {
}
break;
}
}
} while (notWritten);
// get RPC client to namenode
InetSocketAddress addr = new InetSocketAddress("localhost",
cluster.getNameNodePort());
NamenodeProtocol namenode = NameNodeProxies.createProxy(CONF,
NameNode.getUri(addr), NamenodeProtocol.class).getProxy();
// get blocks of size fileLen from dataNodes[0]
BlockWithLocations[] locs;
locs = namenode.getBlocks(dataNodes[0], fileLen).getBlocks();
assertEquals(locs.length, 2);
assertEquals(locs[0].getStorageIDs().length, 2);
assertEquals(locs[1].getStorageIDs().length, 2);
// get blocks of size BlockSize from dataNodes[0]
locs = namenode.getBlocks(dataNodes[0], DEFAULT_BLOCK_SIZE).getBlocks();
assertEquals(locs.length, 1);
assertEquals(locs[0].getStorageIDs().length, 2);
// get blocks of size 1 from dataNodes[0]
locs = namenode.getBlocks(dataNodes[0], 1).getBlocks();
assertEquals(locs.length, 1);
assertEquals(locs[0].getStorageIDs().length, 2);
// get blocks of size 0 from dataNodes[0]
getBlocksWithException(namenode, dataNodes[0], 0);
// get blocks of size -1 from dataNodes[0]
getBlocksWithException(namenode, dataNodes[0], -1);
// get blocks of size BlockSize from a non-existent datanode
DatanodeInfo info = DFSTestUtil.getDatanodeInfo("1.2.3.4");
getBlocksWithException(namenode, info, 2);
} finally {
cluster.shutdown();
}
}
private void getBlocksWithException(NamenodeProtocol namenode,
DatanodeInfo datanode, long size) throws IOException {
boolean getException = false;
try {
namenode.getBlocks(DFSTestUtil.getLocalDatanodeInfo(), 2);
} catch (RemoteException e) {
getException = true;
assertTrue(e.getClassName().contains("HadoopIllegalArgumentException"));
}
assertTrue(getException);
}
@Test
public void testBlockKey() {
Map<Block, Long> map = new HashMap<Block, Long>();
final Random RAN = new Random();
final long seed = RAN.nextLong();
System.out.println("seed=" + seed);
RAN.setSeed(seed);
long[] blkids = new long[10];
for (int i = 0; i < blkids.length; i++) {
blkids[i] = 1000L + RAN.nextInt(100000);
map.put(new Block(blkids[i], 0, blkids[i]), blkids[i]);
}
System.out.println("map=" + map.toString().replace(",", "\n "));
for (int i = 0; i < blkids.length; i++) {
Block b = new Block(blkids[i], 0,
HdfsConstants.GRANDFATHER_GENERATION_STAMP);
Long v = map.get(b);
System.out.println(b + " => " + v);
assertEquals(blkids[i], v.longValue());
}
}
}
| 11,883 | 37.963934 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
import java.io.RandomAccessFile;
import java.io.StringReader;
import java.net.URI;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import com.google.common.collect.Lists;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.CipherSuite;
import org.apache.hadoop.crypto.CryptoProtocolVersion;
import org.apache.hadoop.crypto.key.JavaKeyStoreProvider;
import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.crypto.key.KeyProviderFactory;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FSTestWrapper;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileContextTestWrapper;
import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.FileSystemTestWrapper;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.client.HdfsAdmin;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.server.namenode.EncryptionFaultInjector;
import org.apache.hadoop.hdfs.server.namenode.EncryptionZoneManager;
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck;
import org.apache.hadoop.hdfs.tools.DFSck;
import org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageXmlWriter;
import org.apache.hadoop.hdfs.web.WebHdfsConstants;
import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension.DelegationTokenExtension;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.CryptoExtension;
import org.apache.hadoop.io.Text;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
import static org.mockito.Matchers.anyBoolean;
import static org.mockito.Matchers.anyLong;
import static org.mockito.Matchers.anyObject;
import static org.mockito.Matchers.anyShort;
import static org.mockito.Mockito.withSettings;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.anyString;
import static org.apache.hadoop.hdfs.DFSTestUtil.verifyFilesEqual;
import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import org.xml.sax.InputSource;
import org.xml.sax.helpers.DefaultHandler;
import javax.xml.parsers.SAXParser;
import javax.xml.parsers.SAXParserFactory;
public class TestEncryptionZones {
private Configuration conf;
private FileSystemTestHelper fsHelper;
private MiniDFSCluster cluster;
protected HdfsAdmin dfsAdmin;
protected DistributedFileSystem fs;
private File testRootDir;
protected final String TEST_KEY = "test_key";
private static final String NS_METRICS = "FSNamesystem";
protected FileSystemTestWrapper fsWrapper;
protected FileContextTestWrapper fcWrapper;
protected String getKeyProviderURI() {
return JavaKeyStoreProvider.SCHEME_NAME + "://file" +
new Path(testRootDir.toString(), "test.jks").toUri();
}
@Before
public void setup() throws Exception {
conf = new HdfsConfiguration();
fsHelper = new FileSystemTestHelper();
// Set up java key store
String testRoot = fsHelper.getTestRootDir();
testRootDir = new File(testRoot).getAbsoluteFile();
conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, getKeyProviderURI());
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
// Lower the batch size for testing
conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES,
2);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE);
fs = cluster.getFileSystem();
fsWrapper = new FileSystemTestWrapper(fs);
fcWrapper = new FileContextTestWrapper(
FileContext.getFileContext(cluster.getURI(), conf));
dfsAdmin = new HdfsAdmin(cluster.getURI(), conf);
setProvider();
// Create a test key
DFSTestUtil.createKey(TEST_KEY, cluster, conf);
}
protected void setProvider() {
// Need to set the client's KeyProvider to the NN's for JKS,
// else the updates do not get flushed properly
fs.getClient().setKeyProvider(cluster.getNameNode().getNamesystem()
.getProvider());
}
@After
public void teardown() {
if (cluster != null) {
cluster.shutdown();
}
EncryptionFaultInjector.instance = new EncryptionFaultInjector();
}
public void assertNumZones(final int numZones) throws IOException {
RemoteIterator<EncryptionZone> it = dfsAdmin.listEncryptionZones();
int count = 0;
while (it.hasNext()) {
count++;
it.next();
}
assertEquals("Unexpected number of encryption zones!", numZones, count);
}
/**
* Checks that an encryption zone with the specified keyName and path (if not
* null) is present.
*
* @throws IOException if a matching zone could not be found
*/
public void assertZonePresent(String keyName, String path) throws IOException {
final RemoteIterator<EncryptionZone> it = dfsAdmin.listEncryptionZones();
boolean match = false;
while (it.hasNext()) {
EncryptionZone zone = it.next();
boolean matchKey = (keyName == null);
boolean matchPath = (path == null);
if (keyName != null && zone.getKeyName().equals(keyName)) {
matchKey = true;
}
if (path != null && zone.getPath().equals(path)) {
matchPath = true;
}
if (matchKey && matchPath) {
match = true;
break;
}
}
assertTrue("Did not find expected encryption zone with keyName " + keyName +
" path " + path, match
);
}
@Test(timeout = 60000)
public void testBasicOperations() throws Exception {
int numZones = 0;
/* Test failure of create EZ on a directory that doesn't exist. */
final Path zoneParent = new Path("/zones");
final Path zone1 = new Path(zoneParent, "zone1");
try {
dfsAdmin.createEncryptionZone(zone1, TEST_KEY);
fail("expected /test doesn't exist");
} catch (IOException e) {
assertExceptionContains("cannot find", e);
}
/* Normal creation of an EZ */
fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true);
dfsAdmin.createEncryptionZone(zone1, TEST_KEY);
assertNumZones(++numZones);
assertZonePresent(null, zone1.toString());
/* Test failure of create EZ on a directory which is already an EZ. */
try {
dfsAdmin.createEncryptionZone(zone1, TEST_KEY);
} catch (IOException e) {
assertExceptionContains("already in an encryption zone", e);
}
/* Test failure of create EZ operation in an existing EZ. */
final Path zone1Child = new Path(zone1, "child");
fsWrapper.mkdir(zone1Child, FsPermission.getDirDefault(), false);
try {
dfsAdmin.createEncryptionZone(zone1Child, TEST_KEY);
fail("EZ in an EZ");
} catch (IOException e) {
assertExceptionContains("already in an encryption zone", e);
}
/* create EZ on parent of an EZ should fail */
try {
dfsAdmin.createEncryptionZone(zoneParent, TEST_KEY);
fail("EZ over an EZ");
} catch (IOException e) {
assertExceptionContains("encryption zone for a non-empty directory", e);
}
/* create EZ on a folder with a folder fails */
final Path notEmpty = new Path("/notEmpty");
final Path notEmptyChild = new Path(notEmpty, "child");
fsWrapper.mkdir(notEmptyChild, FsPermission.getDirDefault(), true);
try {
dfsAdmin.createEncryptionZone(notEmpty, TEST_KEY);
fail("Created EZ on an non-empty directory with folder");
} catch (IOException e) {
assertExceptionContains("create an encryption zone", e);
}
fsWrapper.delete(notEmptyChild, false);
/* create EZ on a folder with a file fails */
fsWrapper.createFile(notEmptyChild);
try {
dfsAdmin.createEncryptionZone(notEmpty, TEST_KEY);
fail("Created EZ on an non-empty directory with file");
} catch (IOException e) {
assertExceptionContains("create an encryption zone", e);
}
/* Test failure of create EZ on a file. */
try {
dfsAdmin.createEncryptionZone(notEmptyChild, TEST_KEY);
fail("Created EZ on a file");
} catch (IOException e) {
assertExceptionContains("create an encryption zone for a file.", e);
}
/* Test failure of creating an EZ passing a key that doesn't exist. */
final Path zone2 = new Path("/zone2");
fsWrapper.mkdir(zone2, FsPermission.getDirDefault(), false);
final String myKeyName = "mykeyname";
try {
dfsAdmin.createEncryptionZone(zone2, myKeyName);
fail("expected key doesn't exist");
} catch (IOException e) {
assertExceptionContains("doesn't exist.", e);
}
/* Test failure of empty and null key name */
try {
dfsAdmin.createEncryptionZone(zone2, "");
fail("created a zone with empty key name");
} catch (IOException e) {
assertExceptionContains("Must specify a key name when creating", e);
}
try {
dfsAdmin.createEncryptionZone(zone2, null);
fail("created a zone with null key name");
} catch (IOException e) {
assertExceptionContains("Must specify a key name when creating", e);
}
assertNumZones(1);
/* Test success of creating an EZ when they key exists. */
DFSTestUtil.createKey(myKeyName, cluster, conf);
dfsAdmin.createEncryptionZone(zone2, myKeyName);
assertNumZones(++numZones);
assertZonePresent(myKeyName, zone2.toString());
/* Test failure of create encryption zones as a non super user. */
final UserGroupInformation user = UserGroupInformation.
createUserForTesting("user", new String[] { "mygroup" });
final Path nonSuper = new Path("/nonSuper");
fsWrapper.mkdir(nonSuper, FsPermission.getDirDefault(), false);
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final HdfsAdmin userAdmin =
new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
try {
userAdmin.createEncryptionZone(nonSuper, TEST_KEY);
fail("createEncryptionZone is superuser-only operation");
} catch (AccessControlException e) {
assertExceptionContains("Superuser privilege is required", e);
}
return null;
}
});
// Test success of creating an encryption zone a few levels down.
Path deepZone = new Path("/d/e/e/p/zone");
fsWrapper.mkdir(deepZone, FsPermission.getDirDefault(), true);
dfsAdmin.createEncryptionZone(deepZone, TEST_KEY);
assertNumZones(++numZones);
assertZonePresent(null, deepZone.toString());
// Create and list some zones to test batching of listEZ
for (int i=1; i<6; i++) {
final Path zonePath = new Path("/listZone" + i);
fsWrapper.mkdir(zonePath, FsPermission.getDirDefault(), false);
dfsAdmin.createEncryptionZone(zonePath, TEST_KEY);
numZones++;
assertNumZones(numZones);
assertZonePresent(null, zonePath.toString());
}
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
fs.saveNamespace();
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
cluster.restartNameNode(true);
assertNumZones(numZones);
assertEquals("Unexpected number of encryption zones!", numZones, cluster
.getNamesystem().getNumEncryptionZones());
assertGauge("NumEncryptionZones", numZones, getMetrics(NS_METRICS));
assertZonePresent(null, zone1.toString());
// Verify newly added ez is present after restarting the NameNode
// without persisting the namespace.
Path nonpersistZone = new Path("/nonpersistZone");
fsWrapper.mkdir(nonpersistZone, FsPermission.getDirDefault(), false);
dfsAdmin.createEncryptionZone(nonpersistZone, TEST_KEY);
numZones++;
cluster.restartNameNode(true);
assertNumZones(numZones);
assertZonePresent(null, nonpersistZone.toString());
}
/**
* Test listing encryption zones as a non super user.
*/
@Test(timeout = 60000)
public void testListEncryptionZonesAsNonSuperUser() throws Exception {
final UserGroupInformation user = UserGroupInformation.
createUserForTesting("user", new String[] { "mygroup" });
final Path testRoot = new Path("/tmp/TestEncryptionZones");
final Path superPath = new Path(testRoot, "superuseronly");
final Path allPath = new Path(testRoot, "accessall");
fsWrapper.mkdir(superPath, new FsPermission((short) 0700), true);
dfsAdmin.createEncryptionZone(superPath, TEST_KEY);
fsWrapper.mkdir(allPath, new FsPermission((short) 0707), true);
dfsAdmin.createEncryptionZone(allPath, TEST_KEY);
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final HdfsAdmin userAdmin =
new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
try {
userAdmin.listEncryptionZones();
} catch (AccessControlException e) {
assertExceptionContains("Superuser privilege is required", e);
}
return null;
}
});
}
/**
* Test getEncryptionZoneForPath as a non super user.
*/
@Test(timeout = 60000)
public void testGetEZAsNonSuperUser() throws Exception {
final UserGroupInformation user = UserGroupInformation.
createUserForTesting("user", new String[] { "mygroup" });
final Path testRoot = new Path("/tmp/TestEncryptionZones");
final Path superPath = new Path(testRoot, "superuseronly");
final Path superPathFile = new Path(superPath, "file1");
final Path allPath = new Path(testRoot, "accessall");
final Path allPathFile = new Path(allPath, "file1");
final Path nonEZDir = new Path(testRoot, "nonEZDir");
final Path nonEZFile = new Path(nonEZDir, "file1");
final Path nonexistent = new Path("/nonexistent");
final int len = 8192;
fsWrapper.mkdir(testRoot, new FsPermission((short) 0777), true);
fsWrapper.mkdir(superPath, new FsPermission((short) 0700), false);
fsWrapper.mkdir(allPath, new FsPermission((short) 0777), false);
fsWrapper.mkdir(nonEZDir, new FsPermission((short) 0777), false);
dfsAdmin.createEncryptionZone(superPath, TEST_KEY);
dfsAdmin.createEncryptionZone(allPath, TEST_KEY);
dfsAdmin.allowSnapshot(new Path("/"));
final Path newSnap = fs.createSnapshot(new Path("/"));
DFSTestUtil.createFile(fs, superPathFile, len, (short) 1, 0xFEED);
DFSTestUtil.createFile(fs, allPathFile, len, (short) 1, 0xFEED);
DFSTestUtil.createFile(fs, nonEZFile, len, (short) 1, 0xFEED);
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final HdfsAdmin userAdmin =
new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
// Check null arg
try {
userAdmin.getEncryptionZoneForPath(null);
fail("should have thrown NPE");
} catch (NullPointerException e) {
/*
* IWBNI we could use assertExceptionContains, but the NPE that is
* thrown has no message text.
*/
}
// Check operation with accessible paths
assertEquals("expected ez path", allPath.toString(),
userAdmin.getEncryptionZoneForPath(allPath).getPath().
toString());
assertEquals("expected ez path", allPath.toString(),
userAdmin.getEncryptionZoneForPath(allPathFile).getPath().
toString());
// Check operation with inaccessible (lack of permissions) path
try {
userAdmin.getEncryptionZoneForPath(superPathFile);
fail("expected AccessControlException");
} catch (AccessControlException e) {
assertExceptionContains("Permission denied:", e);
}
assertNull("expected null for nonexistent path",
userAdmin.getEncryptionZoneForPath(nonexistent));
// Check operation with non-ez paths
assertNull("expected null for non-ez path",
userAdmin.getEncryptionZoneForPath(nonEZDir));
assertNull("expected null for non-ez path",
userAdmin.getEncryptionZoneForPath(nonEZFile));
// Check operation with snapshots
String snapshottedAllPath = newSnap.toString() + allPath.toString();
assertEquals("expected ez path", allPath.toString(),
userAdmin.getEncryptionZoneForPath(
new Path(snapshottedAllPath)).getPath().toString());
/*
* Delete the file from the non-snapshot and test that it is still ok
* in the ez.
*/
fs.delete(allPathFile, false);
assertEquals("expected ez path", allPath.toString(),
userAdmin.getEncryptionZoneForPath(
new Path(snapshottedAllPath)).getPath().toString());
// Delete the ez and make sure ss's ez is still ok.
fs.delete(allPath, true);
assertEquals("expected ez path", allPath.toString(),
userAdmin.getEncryptionZoneForPath(
new Path(snapshottedAllPath)).getPath().toString());
assertNull("expected null for deleted file path",
userAdmin.getEncryptionZoneForPath(allPathFile));
assertNull("expected null for deleted directory path",
userAdmin.getEncryptionZoneForPath(allPath));
return null;
}
});
}
/**
* Test success of Rename EZ on a directory which is already an EZ.
*/
private void doRenameEncryptionZone(FSTestWrapper wrapper) throws Exception {
final Path testRoot = new Path("/tmp/TestEncryptionZones");
final Path pathFoo = new Path(testRoot, "foo");
final Path pathFooBaz = new Path(pathFoo, "baz");
final Path pathFooBazFile = new Path(pathFooBaz, "file");
final Path pathFooBar = new Path(pathFoo, "bar");
final Path pathFooBarFile = new Path(pathFooBar, "file");
final int len = 8192;
wrapper.mkdir(pathFoo, FsPermission.getDirDefault(), true);
dfsAdmin.createEncryptionZone(pathFoo, TEST_KEY);
wrapper.mkdir(pathFooBaz, FsPermission.getDirDefault(), true);
DFSTestUtil.createFile(fs, pathFooBazFile, len, (short) 1, 0xFEED);
String contents = DFSTestUtil.readFile(fs, pathFooBazFile);
try {
wrapper.rename(pathFooBaz, testRoot);
} catch (IOException e) {
assertExceptionContains(pathFooBaz.toString() + " can't be moved from" +
" an encryption zone.", e
);
}
// Verify that we can rename dir and files within an encryption zone.
assertTrue(fs.rename(pathFooBaz, pathFooBar));
assertTrue("Rename of dir and file within ez failed",
!wrapper.exists(pathFooBaz) && wrapper.exists(pathFooBar));
assertEquals("Renamed file contents not the same",
contents, DFSTestUtil.readFile(fs, pathFooBarFile));
// Verify that we can rename an EZ root
final Path newFoo = new Path(testRoot, "newfoo");
assertTrue("Rename of EZ root", fs.rename(pathFoo, newFoo));
assertTrue("Rename of EZ root failed",
!wrapper.exists(pathFoo) && wrapper.exists(newFoo));
// Verify that we can't rename an EZ root onto itself
try {
wrapper.rename(newFoo, newFoo);
} catch (IOException e) {
assertExceptionContains("are the same", e);
}
}
@Test(timeout = 60000)
public void testRenameFileSystem() throws Exception {
doRenameEncryptionZone(fsWrapper);
}
@Test(timeout = 60000)
public void testRenameFileContext() throws Exception {
doRenameEncryptionZone(fcWrapper);
}
private FileEncryptionInfo getFileEncryptionInfo(Path path) throws Exception {
LocatedBlocks blocks = fs.getClient().getLocatedBlocks(path.toString(), 0);
return blocks.getFileEncryptionInfo();
}
@Test(timeout = 120000)
public void testReadWrite() throws Exception {
final HdfsAdmin dfsAdmin =
new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
// Create a base file for comparison
final Path baseFile = new Path("/base");
final int len = 8192;
DFSTestUtil.createFile(fs, baseFile, len, (short) 1, 0xFEED);
// Create the first enc file
final Path zone = new Path("/zone");
fs.mkdirs(zone);
dfsAdmin.createEncryptionZone(zone, TEST_KEY);
final Path encFile1 = new Path(zone, "myfile");
DFSTestUtil.createFile(fs, encFile1, len, (short) 1, 0xFEED);
// Read them back in and compare byte-by-byte
verifyFilesEqual(fs, baseFile, encFile1, len);
// Roll the key of the encryption zone
assertNumZones(1);
String keyName = dfsAdmin.listEncryptionZones().next().getKeyName();
cluster.getNamesystem().getProvider().rollNewVersion(keyName);
// Read them back in and compare byte-by-byte
verifyFilesEqual(fs, baseFile, encFile1, len);
// Write a new enc file and validate
final Path encFile2 = new Path(zone, "myfile2");
DFSTestUtil.createFile(fs, encFile2, len, (short) 1, 0xFEED);
// FEInfos should be different
FileEncryptionInfo feInfo1 = getFileEncryptionInfo(encFile1);
FileEncryptionInfo feInfo2 = getFileEncryptionInfo(encFile2);
assertFalse("EDEKs should be different", Arrays
.equals(feInfo1.getEncryptedDataEncryptionKey(),
feInfo2.getEncryptedDataEncryptionKey()));
assertNotEquals("Key was rolled, versions should be different",
feInfo1.getEzKeyVersionName(), feInfo2.getEzKeyVersionName());
// Contents still equal
verifyFilesEqual(fs, encFile1, encFile2, len);
}
@Test(timeout = 120000)
public void testReadWriteUsingWebHdfs() throws Exception {
final HdfsAdmin dfsAdmin =
new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
final FileSystem webHdfsFs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
WebHdfsConstants.WEBHDFS_SCHEME);
final Path zone = new Path("/zone");
fs.mkdirs(zone);
dfsAdmin.createEncryptionZone(zone, TEST_KEY);
/* Create an unencrypted file for comparison purposes. */
final Path unencFile = new Path("/unenc");
final int len = 8192;
DFSTestUtil.createFile(webHdfsFs, unencFile, len, (short) 1, 0xFEED);
/*
* Create the same file via webhdfs, but this time encrypted. Compare it
* using both webhdfs and DFS.
*/
final Path encFile1 = new Path(zone, "myfile");
DFSTestUtil.createFile(webHdfsFs, encFile1, len, (short) 1, 0xFEED);
verifyFilesEqual(webHdfsFs, unencFile, encFile1, len);
verifyFilesEqual(fs, unencFile, encFile1, len);
/*
* Same thing except this time create the encrypted file using DFS.
*/
final Path encFile2 = new Path(zone, "myfile2");
DFSTestUtil.createFile(fs, encFile2, len, (short) 1, 0xFEED);
verifyFilesEqual(webHdfsFs, unencFile, encFile2, len);
verifyFilesEqual(fs, unencFile, encFile2, len);
/* Verify appending to files works correctly. */
appendOneByte(fs, unencFile);
appendOneByte(webHdfsFs, encFile1);
appendOneByte(fs, encFile2);
verifyFilesEqual(webHdfsFs, unencFile, encFile1, len);
verifyFilesEqual(fs, unencFile, encFile1, len);
verifyFilesEqual(webHdfsFs, unencFile, encFile2, len);
verifyFilesEqual(fs, unencFile, encFile2, len);
}
private void appendOneByte(FileSystem fs, Path p) throws IOException {
final FSDataOutputStream out = fs.append(p);
out.write((byte) 0x123);
out.close();
}
@Test(timeout = 60000)
public void testVersionAndSuiteNegotiation() throws Exception {
final HdfsAdmin dfsAdmin =
new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
final Path zone = new Path("/zone");
fs.mkdirs(zone);
dfsAdmin.createEncryptionZone(zone, TEST_KEY);
// Create a file in an EZ, which should succeed
DFSTestUtil
.createFile(fs, new Path(zone, "success1"), 0, (short) 1, 0xFEED);
// Pass no supported versions, fail
DFSOutputStream.SUPPORTED_CRYPTO_VERSIONS = new CryptoProtocolVersion[] {};
try {
DFSTestUtil.createFile(fs, new Path(zone, "fail"), 0, (short) 1, 0xFEED);
fail("Created a file without specifying a crypto protocol version");
} catch (UnknownCryptoProtocolVersionException e) {
assertExceptionContains("No crypto protocol versions", e);
}
// Pass some unknown versions, fail
DFSOutputStream.SUPPORTED_CRYPTO_VERSIONS = new CryptoProtocolVersion[]
{ CryptoProtocolVersion.UNKNOWN, CryptoProtocolVersion.UNKNOWN };
try {
DFSTestUtil.createFile(fs, new Path(zone, "fail"), 0, (short) 1, 0xFEED);
fail("Created a file without specifying a known crypto protocol version");
} catch (UnknownCryptoProtocolVersionException e) {
assertExceptionContains("No crypto protocol versions", e);
}
// Pass some unknown and a good cipherSuites, success
DFSOutputStream.SUPPORTED_CRYPTO_VERSIONS =
new CryptoProtocolVersion[] {
CryptoProtocolVersion.UNKNOWN,
CryptoProtocolVersion.UNKNOWN,
CryptoProtocolVersion.ENCRYPTION_ZONES };
DFSTestUtil
.createFile(fs, new Path(zone, "success2"), 0, (short) 1, 0xFEED);
DFSOutputStream.SUPPORTED_CRYPTO_VERSIONS =
new CryptoProtocolVersion[] {
CryptoProtocolVersion.ENCRYPTION_ZONES,
CryptoProtocolVersion.UNKNOWN,
CryptoProtocolVersion.UNKNOWN} ;
DFSTestUtil
.createFile(fs, new Path(zone, "success3"), 4096, (short) 1, 0xFEED);
// Check KeyProvider state
// Flushing the KP on the NN, since it caches, and init a test one
cluster.getNamesystem().getProvider().flush();
KeyProvider provider = KeyProviderFactory
.get(new URI(conf.getTrimmed(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI)),
conf);
List<String> keys = provider.getKeys();
assertEquals("Expected NN to have created one key per zone", 1,
keys.size());
List<KeyProvider.KeyVersion> allVersions = Lists.newArrayList();
for (String key : keys) {
List<KeyProvider.KeyVersion> versions = provider.getKeyVersions(key);
assertEquals("Should only have one key version per key", 1,
versions.size());
allVersions.addAll(versions);
}
// Check that the specified CipherSuite was correctly saved on the NN
for (int i = 2; i <= 3; i++) {
FileEncryptionInfo feInfo =
getFileEncryptionInfo(new Path(zone.toString() +
"/success" + i));
assertEquals(feInfo.getCipherSuite(), CipherSuite.AES_CTR_NOPADDING);
}
DFSClient old = fs.dfs;
try {
testCipherSuiteNegotiation(fs, conf);
} finally {
fs.dfs = old;
}
}
@SuppressWarnings("unchecked")
private static void mockCreate(ClientProtocol mcp,
CipherSuite suite, CryptoProtocolVersion version) throws Exception {
Mockito.doReturn(
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
(short) 777), "owner", "group", new byte[0], new byte[0],
1010, 0, new FileEncryptionInfo(suite,
version, new byte[suite.getAlgorithmBlockSize()],
new byte[suite.getAlgorithmBlockSize()],
"fakeKey", "fakeVersion"),
(byte) 0))
.when(mcp)
.create(anyString(), (FsPermission) anyObject(), anyString(),
(EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
anyShort(), anyLong(), (CryptoProtocolVersion[]) anyObject());
}
// This test only uses mocks. Called from the end of an existing test to
// avoid an extra mini cluster.
private static void testCipherSuiteNegotiation(DistributedFileSystem fs,
Configuration conf) throws Exception {
// Set up mock ClientProtocol to test client-side CipherSuite negotiation
final ClientProtocol mcp = Mockito.mock(ClientProtocol.class);
// Try with an empty conf
final Configuration noCodecConf = new Configuration(conf);
final CipherSuite suite = CipherSuite.AES_CTR_NOPADDING;
final String confKey = CommonConfigurationKeysPublic
.HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_KEY_PREFIX + suite
.getConfigSuffix();
noCodecConf.set(confKey, "");
fs.dfs = new DFSClient(null, mcp, noCodecConf, null);
mockCreate(mcp, suite, CryptoProtocolVersion.ENCRYPTION_ZONES);
try {
fs.create(new Path("/mock"));
fail("Created with no configured codecs!");
} catch (UnknownCipherSuiteException e) {
assertExceptionContains("No configuration found for the cipher", e);
}
// Try create with an UNKNOWN CipherSuite
fs.dfs = new DFSClient(null, mcp, conf, null);
CipherSuite unknown = CipherSuite.UNKNOWN;
unknown.setUnknownValue(989);
mockCreate(mcp, unknown, CryptoProtocolVersion.ENCRYPTION_ZONES);
try {
fs.create(new Path("/mock"));
fail("Created with unknown cipher!");
} catch (IOException e) {
assertExceptionContains("unknown CipherSuite with ID 989", e);
}
}
@Test(timeout = 120000)
public void testCreateEZWithNoProvider() throws Exception {
// Unset the key provider and make sure EZ ops don't work
final Configuration clusterConf = cluster.getConfiguration(0);
clusterConf.unset(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI);
cluster.restartNameNode(true);
cluster.waitActive();
final Path zone1 = new Path("/zone1");
fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true);
try {
dfsAdmin.createEncryptionZone(zone1, TEST_KEY);
fail("expected exception");
} catch (IOException e) {
assertExceptionContains("since no key provider is available", e);
}
final Path jksPath = new Path(testRootDir.toString(), "test.jks");
clusterConf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri()
);
// Try listing EZs as well
assertNumZones(0);
}
@Test(timeout = 120000)
public void testIsEncryptedMethod() throws Exception {
doTestIsEncryptedMethod(new Path("/"));
doTestIsEncryptedMethod(new Path("/.reserved/raw"));
}
private void doTestIsEncryptedMethod(Path prefix) throws Exception {
try {
dTIEM(prefix);
} finally {
for (FileStatus s : fsWrapper.listStatus(prefix)) {
fsWrapper.delete(s.getPath(), true);
}
}
}
private void dTIEM(Path prefix) throws Exception {
final HdfsAdmin dfsAdmin =
new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
// Create an unencrypted file to check isEncrypted returns false
final Path baseFile = new Path(prefix, "base");
fsWrapper.createFile(baseFile);
FileStatus stat = fsWrapper.getFileStatus(baseFile);
assertFalse("Expected isEncrypted to return false for " + baseFile,
stat.isEncrypted());
// Create an encrypted file to check isEncrypted returns true
final Path zone = new Path(prefix, "zone");
fsWrapper.mkdir(zone, FsPermission.getDirDefault(), true);
dfsAdmin.createEncryptionZone(zone, TEST_KEY);
final Path encFile = new Path(zone, "encfile");
fsWrapper.createFile(encFile);
stat = fsWrapper.getFileStatus(encFile);
assertTrue("Expected isEncrypted to return true for enc file" + encFile,
stat.isEncrypted());
// check that it returns true for an ez root
stat = fsWrapper.getFileStatus(zone);
assertTrue("Expected isEncrypted to return true for ezroot",
stat.isEncrypted());
// check that it returns true for a dir in the ez
final Path zoneSubdir = new Path(zone, "subdir");
fsWrapper.mkdir(zoneSubdir, FsPermission.getDirDefault(), true);
stat = fsWrapper.getFileStatus(zoneSubdir);
assertTrue(
"Expected isEncrypted to return true for ez subdir " + zoneSubdir,
stat.isEncrypted());
// check that it returns false for a non ez dir
final Path nonEzDirPath = new Path(prefix, "nonzone");
fsWrapper.mkdir(nonEzDirPath, FsPermission.getDirDefault(), true);
stat = fsWrapper.getFileStatus(nonEzDirPath);
assertFalse(
"Expected isEncrypted to return false for directory " + nonEzDirPath,
stat.isEncrypted());
// check that it returns true for listings within an ez
FileStatus[] statuses = fsWrapper.listStatus(zone);
for (FileStatus s : statuses) {
assertTrue("Expected isEncrypted to return true for ez stat " + zone,
s.isEncrypted());
}
statuses = fsWrapper.listStatus(encFile);
for (FileStatus s : statuses) {
assertTrue(
"Expected isEncrypted to return true for ez file stat " + encFile,
s.isEncrypted());
}
// check that it returns false for listings outside an ez
statuses = fsWrapper.listStatus(nonEzDirPath);
for (FileStatus s : statuses) {
assertFalse(
"Expected isEncrypted to return false for nonez stat " + nonEzDirPath,
s.isEncrypted());
}
statuses = fsWrapper.listStatus(baseFile);
for (FileStatus s : statuses) {
assertFalse(
"Expected isEncrypted to return false for non ez stat " + baseFile,
s.isEncrypted());
}
}
private class MyInjector extends EncryptionFaultInjector {
int generateCount;
CountDownLatch ready;
CountDownLatch wait;
public MyInjector() {
this.ready = new CountDownLatch(1);
this.wait = new CountDownLatch(1);
}
@Override
public void startFileAfterGenerateKey() throws IOException {
ready.countDown();
try {
wait.await();
} catch (InterruptedException e) {
throw new IOException(e);
}
generateCount++;
}
}
private class CreateFileTask implements Callable<Void> {
private FileSystemTestWrapper fsWrapper;
private Path name;
CreateFileTask(FileSystemTestWrapper fsWrapper, Path name) {
this.fsWrapper = fsWrapper;
this.name = name;
}
@Override
public Void call() throws Exception {
fsWrapper.createFile(name);
return null;
}
}
private class InjectFaultTask implements Callable<Void> {
final Path zone1 = new Path("/zone1");
final Path file = new Path(zone1, "file1");
final ExecutorService executor = Executors.newSingleThreadExecutor();
MyInjector injector;
@Override
public Void call() throws Exception {
// Set up the injector
injector = new MyInjector();
EncryptionFaultInjector.instance = injector;
Future<Void> future =
executor.submit(new CreateFileTask(fsWrapper, file));
injector.ready.await();
// Do the fault
doFault();
// Allow create to proceed
injector.wait.countDown();
future.get();
// Cleanup and postconditions
doCleanup();
return null;
}
public void doFault() throws Exception {}
public void doCleanup() throws Exception {}
}
/**
* Tests the retry logic in startFile. We release the lock while generating
* an EDEK, so tricky things can happen in the intervening time.
*/
@Test(timeout = 120000)
public void testStartFileRetry() throws Exception {
final Path zone1 = new Path("/zone1");
final Path file = new Path(zone1, "file1");
fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true);
ExecutorService executor = Executors.newSingleThreadExecutor();
// Test when the parent directory becomes an EZ
executor.submit(new InjectFaultTask() {
@Override
public void doFault() throws Exception {
dfsAdmin.createEncryptionZone(zone1, TEST_KEY);
}
@Override
public void doCleanup() throws Exception {
assertEquals("Expected a startFile retry", 2, injector.generateCount);
fsWrapper.delete(file, false);
}
}).get();
// Test when the parent directory unbecomes an EZ
executor.submit(new InjectFaultTask() {
@Override
public void doFault() throws Exception {
fsWrapper.delete(zone1, true);
}
@Override
public void doCleanup() throws Exception {
assertEquals("Expected no startFile retries", 1, injector.generateCount);
fsWrapper.delete(file, false);
}
}).get();
// Test when the parent directory becomes a different EZ
fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true);
final String otherKey = "other_key";
DFSTestUtil.createKey(otherKey, cluster, conf);
dfsAdmin.createEncryptionZone(zone1, TEST_KEY);
executor.submit(new InjectFaultTask() {
@Override
public void doFault() throws Exception {
fsWrapper.delete(zone1, true);
fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true);
dfsAdmin.createEncryptionZone(zone1, otherKey);
}
@Override
public void doCleanup() throws Exception {
assertEquals("Expected a startFile retry", 2, injector.generateCount);
fsWrapper.delete(zone1, true);
}
}).get();
// Test that the retry limit leads to an error
fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true);
final String anotherKey = "another_key";
DFSTestUtil.createKey(anotherKey, cluster, conf);
dfsAdmin.createEncryptionZone(zone1, anotherKey);
String keyToUse = otherKey;
MyInjector injector = new MyInjector();
EncryptionFaultInjector.instance = injector;
Future<?> future = executor.submit(new CreateFileTask(fsWrapper, file));
// Flip-flop between two EZs to repeatedly fail
for (int i=0; i<DFSOutputStream.CREATE_RETRY_COUNT+1; i++) {
injector.ready.await();
fsWrapper.delete(zone1, true);
fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true);
dfsAdmin.createEncryptionZone(zone1, keyToUse);
if (keyToUse == otherKey) {
keyToUse = anotherKey;
} else {
keyToUse = otherKey;
}
injector.wait.countDown();
injector = new MyInjector();
EncryptionFaultInjector.instance = injector;
}
try {
future.get();
fail("Expected exception from too many retries");
} catch (ExecutionException e) {
assertExceptionContains(
"Too many retries because of encryption zone operations",
e.getCause());
}
}
/**
* Tests obtaining delegation token from stored key
*/
@Test(timeout = 120000)
public void testDelegationToken() throws Exception {
UserGroupInformation.createRemoteUser("JobTracker");
DistributedFileSystem dfs = cluster.getFileSystem();
KeyProvider keyProvider = Mockito.mock(KeyProvider.class,
withSettings().extraInterfaces(
DelegationTokenExtension.class,
CryptoExtension.class));
Mockito.when(keyProvider.getConf()).thenReturn(conf);
byte[] testIdentifier = "Test identifier for delegation token".getBytes();
Token<?> testToken = new Token(testIdentifier, new byte[0],
new Text(), new Text());
Mockito.when(((DelegationTokenExtension)keyProvider).
addDelegationTokens(anyString(), (Credentials)any())).
thenReturn(new Token<?>[] { testToken });
dfs.getClient().setKeyProvider(keyProvider);
Credentials creds = new Credentials();
final Token<?> tokens[] = dfs.addDelegationTokens("JobTracker", creds);
DistributedFileSystem.LOG.debug("Delegation tokens: " +
Arrays.asList(tokens));
Assert.assertEquals(2, tokens.length);
Assert.assertEquals(tokens[1], testToken);
Assert.assertEquals(1, creds.numberOfTokens());
}
/**
* Test running fsck on a system with encryption zones.
*/
@Test(timeout = 60000)
public void testFsckOnEncryptionZones() throws Exception {
final int len = 8196;
final Path zoneParent = new Path("/zones");
final Path zone1 = new Path(zoneParent, "zone1");
final Path zone1File = new Path(zone1, "file");
fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true);
dfsAdmin.createEncryptionZone(zone1, TEST_KEY);
DFSTestUtil.createFile(fs, zone1File, len, (short) 1, 0xFEED);
ByteArrayOutputStream bStream = new ByteArrayOutputStream();
PrintStream out = new PrintStream(bStream, true);
int errCode = ToolRunner.run(new DFSck(conf, out),
new String[]{ "/" });
assertEquals("Fsck ran with non-zero error code", 0, errCode);
String result = bStream.toString();
assertTrue("Fsck did not return HEALTHY status",
result.contains(NamenodeFsck.HEALTHY_STATUS));
// Run fsck directly on the encryption zone instead of root
errCode = ToolRunner.run(new DFSck(conf, out),
new String[]{ zoneParent.toString() });
assertEquals("Fsck ran with non-zero error code", 0, errCode);
result = bStream.toString();
assertTrue("Fsck did not return HEALTHY status",
result.contains(NamenodeFsck.HEALTHY_STATUS));
}
/**
* Test correctness of successive snapshot creation and deletion
* on a system with encryption zones.
*/
@Test(timeout = 60000)
public void testSnapshotsOnEncryptionZones() throws Exception {
final String TEST_KEY2 = "testkey2";
DFSTestUtil.createKey(TEST_KEY2, cluster, conf);
final int len = 8196;
final Path zoneParent = new Path("/zones");
final Path zone = new Path(zoneParent, "zone");
final Path zoneFile = new Path(zone, "zoneFile");
fsWrapper.mkdir(zone, FsPermission.getDirDefault(), true);
dfsAdmin.allowSnapshot(zoneParent);
dfsAdmin.createEncryptionZone(zone, TEST_KEY);
DFSTestUtil.createFile(fs, zoneFile, len, (short) 1, 0xFEED);
String contents = DFSTestUtil.readFile(fs, zoneFile);
final Path snap1 = fs.createSnapshot(zoneParent, "snap1");
final Path snap1Zone = new Path(snap1, zone.getName());
assertEquals("Got unexpected ez path", zone.toString(),
dfsAdmin.getEncryptionZoneForPath(snap1Zone).getPath().toString());
// Now delete the encryption zone, recreate the dir, and take another
// snapshot
fsWrapper.delete(zone, true);
fsWrapper.mkdir(zone, FsPermission.getDirDefault(), true);
final Path snap2 = fs.createSnapshot(zoneParent, "snap2");
final Path snap2Zone = new Path(snap2, zone.getName());
assertNull("Expected null ez path",
dfsAdmin.getEncryptionZoneForPath(snap2Zone));
// Create the encryption zone again
dfsAdmin.createEncryptionZone(zone, TEST_KEY2);
final Path snap3 = fs.createSnapshot(zoneParent, "snap3");
final Path snap3Zone = new Path(snap3, zone.getName());
// Check that snap3's EZ has the correct settings
EncryptionZone ezSnap3 = dfsAdmin.getEncryptionZoneForPath(snap3Zone);
assertEquals("Got unexpected ez path", zone.toString(),
ezSnap3.getPath().toString());
assertEquals("Unexpected ez key", TEST_KEY2, ezSnap3.getKeyName());
// Check that older snapshots still have the old EZ settings
EncryptionZone ezSnap1 = dfsAdmin.getEncryptionZoneForPath(snap1Zone);
assertEquals("Got unexpected ez path", zone.toString(),
ezSnap1.getPath().toString());
assertEquals("Unexpected ez key", TEST_KEY, ezSnap1.getKeyName());
// Check that listEZs only shows the current filesystem state
ArrayList<EncryptionZone> listZones = Lists.newArrayList();
RemoteIterator<EncryptionZone> it = dfsAdmin.listEncryptionZones();
while (it.hasNext()) {
listZones.add(it.next());
}
for (EncryptionZone z: listZones) {
System.out.println(z);
}
assertEquals("Did not expect additional encryption zones!", 1,
listZones.size());
EncryptionZone listZone = listZones.get(0);
assertEquals("Got unexpected ez path", zone.toString(),
listZone.getPath().toString());
assertEquals("Unexpected ez key", TEST_KEY2, listZone.getKeyName());
// Verify contents of the snapshotted file
final Path snapshottedZoneFile = new Path(
snap1.toString() + "/" + zone.getName() + "/" + zoneFile.getName());
assertEquals("Contents of snapshotted file have changed unexpectedly",
contents, DFSTestUtil.readFile(fs, snapshottedZoneFile));
// Now delete the snapshots out of order and verify the zones are still
// correct
fs.deleteSnapshot(zoneParent, snap2.getName());
assertEquals("Got unexpected ez path", zone.toString(),
dfsAdmin.getEncryptionZoneForPath(snap1Zone).getPath().toString());
assertEquals("Got unexpected ez path", zone.toString(),
dfsAdmin.getEncryptionZoneForPath(snap3Zone).getPath().toString());
fs.deleteSnapshot(zoneParent, snap1.getName());
assertEquals("Got unexpected ez path", zone.toString(),
dfsAdmin.getEncryptionZoneForPath(snap3Zone).getPath().toString());
}
/**
* Verify symlinks can be created in encryption zones and that
* they function properly when the target is in the same
* or different ez.
*/
@Test(timeout = 60000)
public void testEncryptionZonesWithSymlinks() throws Exception {
// Verify we can create an encryption zone over both link and target
final int len = 8192;
final Path parent = new Path("/parent");
final Path linkParent = new Path(parent, "symdir1");
final Path targetParent = new Path(parent, "symdir2");
final Path link = new Path(linkParent, "link");
final Path target = new Path(targetParent, "target");
fs.mkdirs(parent);
dfsAdmin.createEncryptionZone(parent, TEST_KEY);
fs.mkdirs(linkParent);
fs.mkdirs(targetParent);
DFSTestUtil.createFile(fs, target, len, (short)1, 0xFEED);
String content = DFSTestUtil.readFile(fs, target);
fs.createSymlink(target, link, false);
assertEquals("Contents read from link are not the same as target",
content, DFSTestUtil.readFile(fs, link));
fs.delete(parent, true);
// Now let's test when the symlink and target are in different
// encryption zones
fs.mkdirs(linkParent);
fs.mkdirs(targetParent);
dfsAdmin.createEncryptionZone(linkParent, TEST_KEY);
dfsAdmin.createEncryptionZone(targetParent, TEST_KEY);
DFSTestUtil.createFile(fs, target, len, (short)1, 0xFEED);
content = DFSTestUtil.readFile(fs, target);
fs.createSymlink(target, link, false);
assertEquals("Contents read from link are not the same as target",
content, DFSTestUtil.readFile(fs, link));
fs.delete(link, true);
fs.delete(target, true);
}
@Test(timeout = 60000)
public void testConcatFailsInEncryptionZones() throws Exception {
final int len = 8192;
final Path ez = new Path("/ez");
fs.mkdirs(ez);
dfsAdmin.createEncryptionZone(ez, TEST_KEY);
final Path src1 = new Path(ez, "src1");
final Path src2 = new Path(ez, "src2");
final Path target = new Path(ez, "target");
DFSTestUtil.createFile(fs, src1, len, (short)1, 0xFEED);
DFSTestUtil.createFile(fs, src2, len, (short)1, 0xFEED);
DFSTestUtil.createFile(fs, target, len, (short)1, 0xFEED);
try {
fs.concat(target, new Path[] { src1, src2 });
fail("expected concat to throw en exception for files in an ez");
} catch (IOException e) {
assertExceptionContains(
"concat can not be called for files in an encryption zone", e);
}
fs.delete(ez, true);
}
/**
* Test running the OfflineImageViewer on a system with encryption zones.
*/
@Test(timeout = 60000)
public void testOfflineImageViewerOnEncryptionZones() throws Exception {
final int len = 8196;
final Path zoneParent = new Path("/zones");
final Path zone1 = new Path(zoneParent, "zone1");
final Path zone1File = new Path(zone1, "file");
fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true);
dfsAdmin.createEncryptionZone(zone1, TEST_KEY);
DFSTestUtil.createFile(fs, zone1File, len, (short) 1, 0xFEED);
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
fs.saveNamespace();
File originalFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil
.getFSImage(cluster.getNameNode()).getStorage().getStorageDir(0));
if (originalFsimage == null) {
throw new RuntimeException("Didn't generate or can't find fsimage");
}
// Run the XML OIV processor
ByteArrayOutputStream output = new ByteArrayOutputStream();
PrintStream pw = new PrintStream(output);
PBImageXmlWriter v = new PBImageXmlWriter(new Configuration(), pw);
v.visit(new RandomAccessFile(originalFsimage, "r"));
final String xml = output.toString();
SAXParser parser = SAXParserFactory.newInstance().newSAXParser();
parser.parse(new InputSource(new StringReader(xml)), new DefaultHandler());
}
/**
* Test creating encryption zone on the root path
*/
@Test(timeout = 60000)
public void testEncryptionZonesOnRootPath() throws Exception {
final int len = 8196;
final Path rootDir = new Path("/");
final Path zoneFile = new Path(rootDir, "file");
final Path rawFile = new Path("/.reserved/raw/file");
dfsAdmin.createEncryptionZone(rootDir, TEST_KEY);
DFSTestUtil.createFile(fs, zoneFile, len, (short) 1, 0xFEED);
assertEquals("File can be created on the root encryption zone " +
"with correct length",
len, fs.getFileStatus(zoneFile).getLen());
assertEquals("Root dir is encrypted",
true, fs.getFileStatus(rootDir).isEncrypted());
assertEquals("File is encrypted",
true, fs.getFileStatus(zoneFile).isEncrypted());
DFSTestUtil.verifyFilesNotEqual(fs, zoneFile, rawFile, len);
}
@Test(timeout = 60000)
public void testEncryptionZonesOnRelativePath() throws Exception {
final int len = 8196;
final Path baseDir = new Path("/somewhere/base");
final Path zoneDir = new Path("zone");
final Path zoneFile = new Path("file");
fs.setWorkingDirectory(baseDir);
fs.mkdirs(zoneDir);
dfsAdmin.createEncryptionZone(zoneDir, TEST_KEY);
DFSTestUtil.createFile(fs, zoneFile, len, (short) 1, 0xFEED);
assertNumZones(1);
assertZonePresent(TEST_KEY, "/somewhere/base/zone");
assertEquals("Got unexpected ez path", "/somewhere/base/zone", dfsAdmin
.getEncryptionZoneForPath(zoneDir).getPath().toString());
}
}
| 52,582 | 38.388015 | 97 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
/** This is a comprehensive append test that tries
* all combinations of file length and number of appended bytes
* In each iteration, it creates a file of len1. Then reopen
* the file for append. It first append len2 bytes, calls hflush,
* append len3 bytes and close the file. Afterwards, the content of
* the file is validated.
* Len1 ranges from [0, 2*BLOCK_SIZE+1], len2 ranges from [0, BLOCK_SIZE+1],
* and len3 ranges from [0, BLOCK_SIZE+1].
*
*/
public class FileAppendTest4 {
public static final Log LOG = LogFactory.getLog(FileAppendTest4.class);
private static final int BYTES_PER_CHECKSUM = 4;
private static final int PACKET_SIZE = BYTES_PER_CHECKSUM;
private static final int BLOCK_SIZE = 2*PACKET_SIZE;
private static final short REPLICATION = 3;
private static final int DATANODE_NUM = 5;
private static Configuration conf;
private static MiniDFSCluster cluster;
private static DistributedFileSystem fs;
private static void init(Configuration conf) {
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BYTES_PER_CHECKSUM);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, PACKET_SIZE);
}
@BeforeClass
public static void startUp () throws IOException {
conf = new HdfsConfiguration();
init(conf);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build();
fs = cluster.getFileSystem();
}
@AfterClass
public static void tearDown() {
cluster.shutdown();
}
/**
* Comprehensive test for append
* @throws IOException an exception might be thrown
*/
@Test
public void testAppend() throws IOException {
final int maxOldFileLen = 2*BLOCK_SIZE+1;
final int maxFlushedBytes = BLOCK_SIZE;
byte[] contents = AppendTestUtil.initBuffer(
maxOldFileLen+2*maxFlushedBytes);
for (int oldFileLen =0; oldFileLen <=maxOldFileLen; oldFileLen++) {
for (int flushedBytes1=0; flushedBytes1<=maxFlushedBytes;
flushedBytes1++) {
for (int flushedBytes2=0; flushedBytes2 <=maxFlushedBytes;
flushedBytes2++) {
final int fileLen = oldFileLen + flushedBytes1 + flushedBytes2;
// create the initial file of oldFileLen
final Path p =
new Path("foo"+ oldFileLen +"_"+ flushedBytes1 +"_"+ flushedBytes2);
LOG.info("Creating file " + p);
FSDataOutputStream out = fs.create(p, false,
conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
REPLICATION, BLOCK_SIZE);
out.write(contents, 0, oldFileLen);
out.close();
// append flushedBytes bytes to the file
out = fs.append(p);
out.write(contents, oldFileLen, flushedBytes1);
out.hflush();
// write another flushedBytes2 bytes to the file
out.write(contents, oldFileLen + flushedBytes1, flushedBytes2);
out.close();
// validate the file content
AppendTestUtil.checkFullFile(fs, p, fileLen, contents, p.toString());
fs.delete(p, false);
}
}
}
}
}
| 4,438 | 37.267241 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FileSystemContractBaseTest;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.UserGroupInformation;
public class TestHDFSFileSystemContract extends FileSystemContractBaseTest {
private MiniDFSCluster cluster;
private String defaultWorkingDirectory;
@Override
protected void setUp() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY,
FileSystemContractBaseTest.TEST_UMASK);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
fs = cluster.getFileSystem();
defaultWorkingDirectory = "/user/" +
UserGroupInformation.getCurrentUser().getShortUserName();
}
@Override
protected void tearDown() throws Exception {
super.tearDown();
cluster.shutdown();
cluster = null;
}
@Override
protected String getDefaultWorkingDirectory() {
return defaultWorkingDirectory;
}
public void testAppend() throws IOException {
AppendTestUtil.testAppend(fs, new Path("/testAppend/f"));
}
}
| 2,062 | 32.819672 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY;
import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HOSTS;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.io.PrintWriter;
import java.io.RandomAccessFile;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URISyntaxException;
import java.nio.channels.FileChannel;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
import org.apache.hadoop.ha.ServiceFailedException;
import org.apache.hadoop.hdfs.MiniDFSNNTopology.NNConf;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.Util;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.datanode.DataStorage;
import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil;
import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter;
import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetUtil;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl;
import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.tools.DFSAdmin;
import org.apache.hadoop.hdfs.web.HftpFileSystem;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.net.DNSToSwitchMapping;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.StaticMapping;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.ProxyUsers;
import org.apache.hadoop.util.ExitUtil;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.ToolRunner;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
/**
* This class creates a single-process DFS cluster for junit testing.
* The data directories for non-simulated DFS are under the testing directory.
* For simulated data nodes, no underlying fs storage is used.
*/
@InterfaceAudience.LimitedPrivate({"HBase", "HDFS", "Hive", "MapReduce", "Pig"})
@InterfaceStability.Unstable
public class MiniDFSCluster {
private static final String NAMESERVICE_ID_PREFIX = "nameserviceId";
private static final Log LOG = LogFactory.getLog(MiniDFSCluster.class);
/** System property to set the data dir: {@value} */
public static final String PROP_TEST_BUILD_DATA = "test.build.data";
/** Configuration option to set the data dir: {@value} */
public static final String HDFS_MINIDFS_BASEDIR = "hdfs.minidfs.basedir";
public static final String DFS_NAMENODE_SAFEMODE_EXTENSION_TESTING_KEY
= DFS_NAMENODE_SAFEMODE_EXTENSION_KEY + ".testing";
// Changing this default may break some tests that assume it is 2.
private static final int DEFAULT_STORAGES_PER_DATANODE = 2;
static { DefaultMetricsSystem.setMiniClusterMode(true); }
public int getStoragesPerDatanode() {
return storagesPerDatanode;
}
/**
* Class to construct instances of MiniDFSClusters with specific options.
*/
public static class Builder {
private int nameNodePort = 0;
private int nameNodeHttpPort = 0;
private final Configuration conf;
private int numDataNodes = 1;
private StorageType[][] storageTypes = null;
private StorageType[] storageTypes1D = null;
private int storagesPerDatanode = DEFAULT_STORAGES_PER_DATANODE;
private boolean format = true;
private boolean manageNameDfsDirs = true;
private boolean manageNameDfsSharedDirs = true;
private boolean enableManagedDfsDirsRedundancy = true;
private boolean manageDataDfsDirs = true;
private StartupOption option = null;
private StartupOption dnOption = null;
private String[] racks = null;
private String [] hosts = null;
private long [] simulatedCapacities = null;
private long [][] storageCapacities = null;
private long [] storageCapacities1D = null;
private String clusterId = null;
private boolean waitSafeMode = true;
private boolean setupHostsFile = false;
private MiniDFSNNTopology nnTopology = null;
private boolean checkExitOnShutdown = true;
private boolean checkDataNodeAddrConfig = false;
private boolean checkDataNodeHostConfig = false;
private Configuration[] dnConfOverlays;
private boolean skipFsyncForTesting = true;
public Builder(Configuration conf) {
this.conf = conf;
}
/**
* Default: 0
*/
public Builder nameNodePort(int val) {
this.nameNodePort = val;
return this;
}
/**
* Default: 0
*/
public Builder nameNodeHttpPort(int val) {
this.nameNodeHttpPort = val;
return this;
}
/**
* Default: 1
*/
public Builder numDataNodes(int val) {
this.numDataNodes = val;
return this;
}
/**
* Default: DEFAULT_STORAGES_PER_DATANODE
*/
public Builder storagesPerDatanode(int numStorages) {
this.storagesPerDatanode = numStorages;
return this;
}
/**
* Set the same storage type configuration for each datanode.
* If storageTypes is uninitialized or passed null then
* StorageType.DEFAULT is used.
*/
public Builder storageTypes(StorageType[] types) {
this.storageTypes1D = types;
return this;
}
/**
* Set custom storage type configuration for each datanode.
* If storageTypes is uninitialized or passed null then
* StorageType.DEFAULT is used.
*/
public Builder storageTypes(StorageType[][] types) {
this.storageTypes = types;
return this;
}
/**
* Set the same storage capacity configuration for each datanode.
* If storageTypes is uninitialized or passed null then
* StorageType.DEFAULT is used.
*/
public Builder storageCapacities(long[] capacities) {
this.storageCapacities1D = capacities;
return this;
}
/**
* Set custom storage capacity configuration for each datanode.
* If storageCapacities is uninitialized or passed null then
* capacity is limited by available disk space.
*/
public Builder storageCapacities(long[][] capacities) {
this.storageCapacities = capacities;
return this;
}
/**
* Default: true
*/
public Builder format(boolean val) {
this.format = val;
return this;
}
/**
* Default: true
*/
public Builder manageNameDfsDirs(boolean val) {
this.manageNameDfsDirs = val;
return this;
}
/**
* Default: true
*/
public Builder manageNameDfsSharedDirs(boolean val) {
this.manageNameDfsSharedDirs = val;
return this;
}
/**
* Default: true
*/
public Builder enableManagedDfsDirsRedundancy(boolean val) {
this.enableManagedDfsDirsRedundancy = val;
return this;
}
/**
* Default: true
*/
public Builder manageDataDfsDirs(boolean val) {
this.manageDataDfsDirs = val;
return this;
}
/**
* Default: null
*/
public Builder startupOption(StartupOption val) {
this.option = val;
return this;
}
/**
* Default: null
*/
public Builder dnStartupOption(StartupOption val) {
this.dnOption = val;
return this;
}
/**
* Default: null
*/
public Builder racks(String[] val) {
this.racks = val;
return this;
}
/**
* Default: null
*/
public Builder hosts(String[] val) {
this.hosts = val;
return this;
}
/**
* Use SimulatedFSDataset and limit the capacity of each DN per
* the values passed in val.
*
* For limiting the capacity of volumes with real storage, see
* {@link FsVolumeImpl#setCapacityForTesting}
* Default: null
*/
public Builder simulatedCapacities(long[] val) {
this.simulatedCapacities = val;
return this;
}
/**
* Default: true
*/
public Builder waitSafeMode(boolean val) {
this.waitSafeMode = val;
return this;
}
/**
* Default: true
*/
public Builder checkExitOnShutdown(boolean val) {
this.checkExitOnShutdown = val;
return this;
}
/**
* Default: false
*/
public Builder checkDataNodeAddrConfig(boolean val) {
this.checkDataNodeAddrConfig = val;
return this;
}
/**
* Default: false
*/
public Builder checkDataNodeHostConfig(boolean val) {
this.checkDataNodeHostConfig = val;
return this;
}
/**
* Default: null
*/
public Builder clusterId(String cid) {
this.clusterId = cid;
return this;
}
/**
* Default: false
* When true the hosts file/include file for the cluster is setup
*/
public Builder setupHostsFile(boolean val) {
this.setupHostsFile = val;
return this;
}
/**
* Default: a single namenode.
* See {@link MiniDFSNNTopology#simpleFederatedTopology(int)} to set up
* federated nameservices
*/
public Builder nnTopology(MiniDFSNNTopology topology) {
this.nnTopology = topology;
return this;
}
/**
* Default: null
*
* An array of {@link Configuration} objects that will overlay the
* global MiniDFSCluster Configuration for the corresponding DataNode.
*
* Useful for setting specific per-DataNode configuration parameters.
*/
public Builder dataNodeConfOverlays(Configuration[] dnConfOverlays) {
this.dnConfOverlays = dnConfOverlays;
return this;
}
/**
* Default: true
* When true, we skip fsync() calls for speed improvements.
*/
public Builder skipFsyncForTesting(boolean val) {
this.skipFsyncForTesting = val;
return this;
}
/**
* Construct the actual MiniDFSCluster
*/
public MiniDFSCluster build() throws IOException {
return new MiniDFSCluster(this);
}
}
/**
* Used by builder to create and return an instance of MiniDFSCluster
*/
protected MiniDFSCluster(Builder builder) throws IOException {
if (builder.nnTopology == null) {
// If no topology is specified, build a single NN.
builder.nnTopology = MiniDFSNNTopology.simpleSingleNN(
builder.nameNodePort, builder.nameNodeHttpPort);
}
assert builder.storageTypes == null ||
builder.storageTypes.length == builder.numDataNodes;
final int numNameNodes = builder.nnTopology.countNameNodes();
LOG.info("starting cluster: numNameNodes=" + numNameNodes
+ ", numDataNodes=" + builder.numDataNodes);
nameNodes = new NameNodeInfo[numNameNodes];
this.storagesPerDatanode = builder.storagesPerDatanode;
// Duplicate the storageType setting for each DN.
if (builder.storageTypes == null && builder.storageTypes1D != null) {
assert builder.storageTypes1D.length == storagesPerDatanode;
builder.storageTypes = new StorageType[builder.numDataNodes][storagesPerDatanode];
for (int i = 0; i < builder.numDataNodes; ++i) {
builder.storageTypes[i] = builder.storageTypes1D;
}
}
// Duplicate the storageCapacity setting for each DN.
if (builder.storageCapacities == null && builder.storageCapacities1D != null) {
assert builder.storageCapacities1D.length == storagesPerDatanode;
builder.storageCapacities = new long[builder.numDataNodes][storagesPerDatanode];
for (int i = 0; i < builder.numDataNodes; ++i) {
builder.storageCapacities[i] = builder.storageCapacities1D;
}
}
initMiniDFSCluster(builder.conf,
builder.numDataNodes,
builder.storageTypes,
builder.format,
builder.manageNameDfsDirs,
builder.manageNameDfsSharedDirs,
builder.enableManagedDfsDirsRedundancy,
builder.manageDataDfsDirs,
builder.option,
builder.dnOption,
builder.racks,
builder.hosts,
builder.storageCapacities,
builder.simulatedCapacities,
builder.clusterId,
builder.waitSafeMode,
builder.setupHostsFile,
builder.nnTopology,
builder.checkExitOnShutdown,
builder.checkDataNodeAddrConfig,
builder.checkDataNodeHostConfig,
builder.dnConfOverlays,
builder.skipFsyncForTesting);
}
public class DataNodeProperties {
final DataNode datanode;
final Configuration conf;
String[] dnArgs;
final SecureResources secureResources;
final int ipcPort;
DataNodeProperties(DataNode node, Configuration conf, String[] args,
SecureResources secureResources, int ipcPort) {
this.datanode = node;
this.conf = conf;
this.dnArgs = args;
this.secureResources = secureResources;
this.ipcPort = ipcPort;
}
public void setDnArgs(String ... args) {
dnArgs = args;
}
}
private Configuration conf;
private NameNodeInfo[] nameNodes;
protected int numDataNodes;
protected final ArrayList<DataNodeProperties> dataNodes =
new ArrayList<DataNodeProperties>();
private File base_dir;
private File data_dir;
private boolean waitSafeMode = true;
private boolean federation;
private boolean checkExitOnShutdown = true;
protected final int storagesPerDatanode;
private Set<FileSystem> fileSystems = Sets.newHashSet();
/**
* A unique instance identifier for the cluster. This
* is used to disambiguate HA filesystems in the case where
* multiple MiniDFSClusters are used in the same test suite.
*/
private int instanceId;
private static int instanceCount = 0;
/**
* Stores the information related to a namenode in the cluster
*/
public static class NameNodeInfo {
final NameNode nameNode;
final Configuration conf;
final String nameserviceId;
final String nnId;
StartupOption startOpt;
NameNodeInfo(NameNode nn, String nameserviceId, String nnId,
StartupOption startOpt, Configuration conf) {
this.nameNode = nn;
this.nameserviceId = nameserviceId;
this.nnId = nnId;
this.startOpt = startOpt;
this.conf = conf;
}
public void setStartOpt(StartupOption startOpt) {
this.startOpt = startOpt;
}
}
/**
* This null constructor is used only when wishing to start a data node cluster
* without a name node (ie when the name node is started elsewhere).
*/
public MiniDFSCluster() {
nameNodes = new NameNodeInfo[0]; // No namenode in the cluster
storagesPerDatanode = DEFAULT_STORAGES_PER_DATANODE;
synchronized (MiniDFSCluster.class) {
instanceId = instanceCount++;
}
}
/**
* Modify the config and start up the servers with the given operation.
* Servers will be started on free ports.
* <p>
* The caller must manage the creation of NameNode and DataNode directories
* and have already set {@link DFSConfigKeys#DFS_NAMENODE_NAME_DIR_KEY} and
* {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} in the given conf.
*
* @param conf the base configuration to use in starting the servers. This
* will be modified as necessary.
* @param numDataNodes Number of DataNodes to start; may be zero
* @param nameNodeOperation the operation with which to start the servers. If null
* or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
*/
@Deprecated // in 22 to be removed in 24. Use MiniDFSCluster.Builder instead
public MiniDFSCluster(Configuration conf,
int numDataNodes,
StartupOption nameNodeOperation) throws IOException {
this(0, conf, numDataNodes, false, false, false, nameNodeOperation,
null, null, null);
}
/**
* Modify the config and start up the servers. The rpc and info ports for
* servers are guaranteed to use free ports.
* <p>
* NameNode and DataNode directory creation and configuration will be
* managed by this class.
*
* @param conf the base configuration to use in starting the servers. This
* will be modified as necessary.
* @param numDataNodes Number of DataNodes to start; may be zero
* @param format if true, format the NameNode and DataNodes before starting up
* @param racks array of strings indicating the rack that each DataNode is on
*/
@Deprecated // in 22 to be removed in 24. Use MiniDFSCluster.Builder instead
public MiniDFSCluster(Configuration conf,
int numDataNodes,
boolean format,
String[] racks) throws IOException {
this(0, conf, numDataNodes, format, true, true, null, racks, null, null);
}
/**
* Modify the config and start up the servers. The rpc and info ports for
* servers are guaranteed to use free ports.
* <p>
* NameNode and DataNode directory creation and configuration will be
* managed by this class.
*
* @param conf the base configuration to use in starting the servers. This
* will be modified as necessary.
* @param numDataNodes Number of DataNodes to start; may be zero
* @param format if true, format the NameNode and DataNodes before starting up
* @param racks array of strings indicating the rack that each DataNode is on
* @param hosts array of strings indicating the hostname for each DataNode
*/
@Deprecated // in 22 to be removed in 24. Use MiniDFSCluster.Builder instead
public MiniDFSCluster(Configuration conf,
int numDataNodes,
boolean format,
String[] racks, String[] hosts) throws IOException {
this(0, conf, numDataNodes, format, true, true, null, racks, hosts, null);
}
/**
* NOTE: if possible, the other constructors that don't have nameNode port
* parameter should be used as they will ensure that the servers use free
* ports.
* <p>
* Modify the config and start up the servers.
*
* @param nameNodePort suggestion for which rpc port to use. caller should
* use getNameNodePort() to get the actual port used.
* @param conf the base configuration to use in starting the servers. This
* will be modified as necessary.
* @param numDataNodes Number of DataNodes to start; may be zero
* @param format if true, format the NameNode and DataNodes before starting
* up
* @param manageDfsDirs if true, the data directories for servers will be
* created and {@link DFSConfigKeys#DFS_NAMENODE_NAME_DIR_KEY} and
* {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be set in
* the conf
* @param operation the operation with which to start the servers. If null
* or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
* @param racks array of strings indicating the rack that each DataNode is on
*/
@Deprecated // in 22 to be removed in 24. Use MiniDFSCluster.Builder instead
public MiniDFSCluster(int nameNodePort,
Configuration conf,
int numDataNodes,
boolean format,
boolean manageDfsDirs,
StartupOption operation,
String[] racks) throws IOException {
this(nameNodePort, conf, numDataNodes, format, manageDfsDirs, manageDfsDirs,
operation, racks, null, null);
}
/**
* NOTE: if possible, the other constructors that don't have nameNode port
* parameter should be used as they will ensure that the servers use free ports.
* <p>
* Modify the config and start up the servers.
*
* @param nameNodePort suggestion for which rpc port to use. caller should
* use getNameNodePort() to get the actual port used.
* @param conf the base configuration to use in starting the servers. This
* will be modified as necessary.
* @param numDataNodes Number of DataNodes to start; may be zero
* @param format if true, format the NameNode and DataNodes before starting up
* @param manageDfsDirs if true, the data directories for servers will be
* created and {@link DFSConfigKeys#DFS_NAMENODE_NAME_DIR_KEY} and
* {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be set in
* the conf
* @param operation the operation with which to start the servers. If null
* or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
* @param racks array of strings indicating the rack that each DataNode is on
* @param simulatedCapacities array of capacities of the simulated data nodes
*/
@Deprecated // in 22 to be removed in 24. Use MiniDFSCluster.Builder instead
public MiniDFSCluster(int nameNodePort,
Configuration conf,
int numDataNodes,
boolean format,
boolean manageDfsDirs,
StartupOption operation,
String[] racks,
long[] simulatedCapacities) throws IOException {
this(nameNodePort, conf, numDataNodes, format, manageDfsDirs, manageDfsDirs,
operation, racks, null, simulatedCapacities);
}
/**
* NOTE: if possible, the other constructors that don't have nameNode port
* parameter should be used as they will ensure that the servers use free ports.
* <p>
* Modify the config and start up the servers.
*
* @param nameNodePort suggestion for which rpc port to use. caller should
* use getNameNodePort() to get the actual port used.
* @param conf the base configuration to use in starting the servers. This
* will be modified as necessary.
* @param numDataNodes Number of DataNodes to start; may be zero
* @param format if true, format the NameNode and DataNodes before starting up
* @param manageNameDfsDirs if true, the data directories for servers will be
* created and {@link DFSConfigKeys#DFS_NAMENODE_NAME_DIR_KEY} and
* {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be set in
* the conf
* @param manageDataDfsDirs if true, the data directories for datanodes will
* be created and {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY}
* set to same in the conf
* @param operation the operation with which to start the servers. If null
* or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
* @param racks array of strings indicating the rack that each DataNode is on
* @param hosts array of strings indicating the hostnames of each DataNode
* @param simulatedCapacities array of capacities of the simulated data nodes
*/
@Deprecated // in 22 to be removed in 24. Use MiniDFSCluster.Builder instead
public MiniDFSCluster(int nameNodePort,
Configuration conf,
int numDataNodes,
boolean format,
boolean manageNameDfsDirs,
boolean manageDataDfsDirs,
StartupOption operation,
String[] racks, String hosts[],
long[] simulatedCapacities) throws IOException {
this.nameNodes = new NameNodeInfo[1]; // Single namenode in the cluster
this.storagesPerDatanode = DEFAULT_STORAGES_PER_DATANODE;
initMiniDFSCluster(conf, numDataNodes, null, format,
manageNameDfsDirs, true, manageDataDfsDirs, manageDataDfsDirs,
operation, null, racks, hosts,
null, simulatedCapacities, null, true, false,
MiniDFSNNTopology.simpleSingleNN(nameNodePort, 0),
true, false, false, null, true);
}
private void initMiniDFSCluster(
Configuration conf,
int numDataNodes, StorageType[][] storageTypes, boolean format, boolean manageNameDfsDirs,
boolean manageNameDfsSharedDirs, boolean enableManagedDfsDirsRedundancy,
boolean manageDataDfsDirs, StartupOption startOpt,
StartupOption dnStartOpt, String[] racks,
String[] hosts,
long[][] storageCapacities, long[] simulatedCapacities, String clusterId,
boolean waitSafeMode, boolean setupHostsFile,
MiniDFSNNTopology nnTopology, boolean checkExitOnShutdown,
boolean checkDataNodeAddrConfig,
boolean checkDataNodeHostConfig,
Configuration[] dnConfOverlays,
boolean skipFsyncForTesting)
throws IOException {
boolean success = false;
try {
ExitUtil.disableSystemExit();
// Re-enable symlinks for tests, see HADOOP-10020 and HADOOP-10052
FileSystem.enableSymlinks();
synchronized (MiniDFSCluster.class) {
instanceId = instanceCount++;
}
this.conf = conf;
base_dir = new File(determineDfsBaseDir());
data_dir = new File(base_dir, "data");
this.waitSafeMode = waitSafeMode;
this.checkExitOnShutdown = checkExitOnShutdown;
int replication = conf.getInt(DFS_REPLICATION_KEY, 3);
conf.setInt(DFS_REPLICATION_KEY, Math.min(replication, numDataNodes));
int safemodeExtension = conf.getInt(
DFS_NAMENODE_SAFEMODE_EXTENSION_TESTING_KEY, 0);
conf.setInt(DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, safemodeExtension);
conf.setInt(DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 3); // 3 second
conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
StaticMapping.class, DNSToSwitchMapping.class);
// In an HA cluster, in order for the StandbyNode to perform checkpoints,
// it needs to know the HTTP port of the Active. So, if ephemeral ports
// are chosen, disable checkpoints for the test.
if (!nnTopology.allHttpPortsSpecified() &&
nnTopology.isHA()) {
LOG.info("MiniDFSCluster disabling checkpointing in the Standby node " +
"since no HTTP ports have been specified.");
conf.setBoolean(DFS_HA_STANDBY_CHECKPOINTS_KEY, false);
}
if (!nnTopology.allIpcPortsSpecified() &&
nnTopology.isHA()) {
LOG.info("MiniDFSCluster disabling log-roll triggering in the "
+ "Standby node since no IPC ports have been specified.");
conf.setInt(DFS_HA_LOGROLL_PERIOD_KEY, -1);
}
EditLogFileOutputStream.setShouldSkipFsyncForTesting(skipFsyncForTesting);
federation = nnTopology.isFederated();
try {
createNameNodesAndSetConf(
nnTopology, manageNameDfsDirs, manageNameDfsSharedDirs,
enableManagedDfsDirsRedundancy,
format, startOpt, clusterId, conf);
} catch (IOException ioe) {
LOG.error("IOE creating namenodes. Permissions dump:\n" +
createPermissionsDiagnosisString(data_dir), ioe);
throw ioe;
}
if (format) {
if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) {
throw new IOException("Cannot remove data directory: " + data_dir +
createPermissionsDiagnosisString(data_dir));
}
}
if (startOpt == StartupOption.RECOVER) {
return;
}
// Start the DataNodes
startDataNodes(conf, numDataNodes, storageTypes, manageDataDfsDirs,
dnStartOpt != null ? dnStartOpt : startOpt,
racks, hosts, storageCapacities, simulatedCapacities, setupHostsFile,
checkDataNodeAddrConfig, checkDataNodeHostConfig, dnConfOverlays);
waitClusterUp();
//make sure ProxyUsers uses the latest conf
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
success = true;
} finally {
if (!success) {
shutdown();
}
}
}
/**
* @return a debug string which can help diagnose an error of why
* a given directory might have a permissions error in the context
* of a test case
*/
private String createPermissionsDiagnosisString(File path) {
StringBuilder sb = new StringBuilder();
while (path != null) {
sb.append("path '" + path + "': ").append("\n");
sb.append("\tabsolute:").append(path.getAbsolutePath()).append("\n");
sb.append("\tpermissions: ");
sb.append(path.isDirectory() ? "d": "-");
sb.append(FileUtil.canRead(path) ? "r" : "-");
sb.append(FileUtil.canWrite(path) ? "w" : "-");
sb.append(FileUtil.canExecute(path) ? "x" : "-");
sb.append("\n");
path = path.getParentFile();
}
return sb.toString();
}
private void createNameNodesAndSetConf(MiniDFSNNTopology nnTopology,
boolean manageNameDfsDirs, boolean manageNameDfsSharedDirs,
boolean enableManagedDfsDirsRedundancy, boolean format,
StartupOption operation, String clusterId,
Configuration conf) throws IOException {
Preconditions.checkArgument(nnTopology.countNameNodes() > 0,
"empty NN topology: no namenodes specified!");
if (!federation && nnTopology.countNameNodes() == 1) {
NNConf onlyNN = nnTopology.getOnlyNameNode();
// we only had one NN, set DEFAULT_NAME for it. If not explicitly
// specified initially, the port will be 0 to make NN bind to any
// available port. It will be set to the right address after
// NN is started.
conf.set(FS_DEFAULT_NAME_KEY, "hdfs://127.0.0.1:" + onlyNN.getIpcPort());
}
List<String> allNsIds = Lists.newArrayList();
for (MiniDFSNNTopology.NSConf nameservice : nnTopology.getNameservices()) {
if (nameservice.getId() != null) {
allNsIds.add(nameservice.getId());
}
}
if (!allNsIds.isEmpty()) {
conf.set(DFS_NAMESERVICES, Joiner.on(",").join(allNsIds));
}
int nnCounter = 0;
for (MiniDFSNNTopology.NSConf nameservice : nnTopology.getNameservices()) {
String nsId = nameservice.getId();
String lastDefaultFileSystem = null;
Preconditions.checkArgument(
!federation || nsId != null,
"if there is more than one NS, they must have names");
// First set up the configuration which all of the NNs
// need to have - have to do this a priori before starting
// *any* of the NNs, so they know to come up in standby.
List<String> nnIds = Lists.newArrayList();
// Iterate over the NNs in this nameservice
for (NNConf nn : nameservice.getNNs()) {
nnIds.add(nn.getNnId());
initNameNodeAddress(conf, nameservice.getId(), nn);
}
// If HA is enabled on this nameservice, enumerate all the namenodes
// in the configuration. Also need to set a shared edits dir
if (nnIds.size() > 1) {
conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, nameservice.getId()),
Joiner.on(",").join(nnIds));
if (manageNameDfsSharedDirs) {
URI sharedEditsUri = getSharedEditsDir(nnCounter, nnCounter+nnIds.size()-1);
conf.set(DFS_NAMENODE_SHARED_EDITS_DIR_KEY, sharedEditsUri.toString());
// Clean out the shared edits dir completely, including all subdirectories.
FileUtil.fullyDelete(new File(sharedEditsUri));
}
}
// Now format first NN and copy the storage directory from that node to the others.
int i = 0;
Collection<URI> prevNNDirs = null;
int nnCounterForFormat = nnCounter;
for (NNConf nn : nameservice.getNNs()) {
initNameNodeConf(conf, nsId, nn.getNnId(), manageNameDfsDirs,
enableManagedDfsDirsRedundancy, nnCounterForFormat);
Collection<URI> namespaceDirs = FSNamesystem.getNamespaceDirs(conf);
if (format) {
for (URI nameDirUri : namespaceDirs) {
File nameDir = new File(nameDirUri);
if (nameDir.exists() && !FileUtil.fullyDelete(nameDir)) {
throw new IOException("Could not fully delete " + nameDir);
}
}
Collection<URI> checkpointDirs = Util.stringCollectionAsURIs(conf
.getTrimmedStringCollection(DFS_NAMENODE_CHECKPOINT_DIR_KEY));
for (URI checkpointDirUri : checkpointDirs) {
File checkpointDir = new File(checkpointDirUri);
if (checkpointDir.exists() && !FileUtil.fullyDelete(checkpointDir)) {
throw new IOException("Could not fully delete " + checkpointDir);
}
}
}
boolean formatThisOne = format;
if (format && i++ > 0) {
// Don't format the second NN in an HA setup - that
// would result in it having a different clusterID,
// block pool ID, etc. Instead, copy the name dirs
// from the first one.
formatThisOne = false;
assert (null != prevNNDirs);
copyNameDirs(prevNNDirs, namespaceDirs, conf);
}
nnCounterForFormat++;
if (formatThisOne) {
// Allow overriding clusterID for specific NNs to test
// misconfiguration.
if (nn.getClusterId() == null) {
StartupOption.FORMAT.setClusterId(clusterId);
} else {
StartupOption.FORMAT.setClusterId(nn.getClusterId());
}
DFSTestUtil.formatNameNode(conf);
}
prevNNDirs = namespaceDirs;
}
// Start all Namenodes
for (NNConf nn : nameservice.getNNs()) {
initNameNodeConf(conf, nsId, nn.getNnId(), manageNameDfsDirs,
enableManagedDfsDirsRedundancy, nnCounter);
createNameNode(nnCounter, conf, numDataNodes, false, operation,
clusterId, nsId, nn.getNnId());
// Record the last namenode uri
if (nameNodes[nnCounter] != null && nameNodes[nnCounter].conf != null) {
lastDefaultFileSystem =
nameNodes[nnCounter].conf.get(FS_DEFAULT_NAME_KEY);
}
nnCounter++;
}
if (!federation && lastDefaultFileSystem != null) {
// Set the default file system to the actual bind address of NN.
conf.set(FS_DEFAULT_NAME_KEY, lastDefaultFileSystem);
}
}
}
public URI getSharedEditsDir(int minNN, int maxNN) throws IOException {
return formatSharedEditsDir(base_dir, minNN, maxNN);
}
public static URI formatSharedEditsDir(File baseDir, int minNN, int maxNN)
throws IOException {
return fileAsURI(new File(baseDir, "shared-edits-" +
minNN + "-through-" + maxNN));
}
public NameNodeInfo[] getNameNodeInfos() {
return this.nameNodes;
}
private void initNameNodeConf(Configuration conf,
String nameserviceId, String nnId,
boolean manageNameDfsDirs, boolean enableManagedDfsDirsRedundancy,
int nnIndex) throws IOException {
if (nameserviceId != null) {
conf.set(DFS_NAMESERVICE_ID, nameserviceId);
}
if (nnId != null) {
conf.set(DFS_HA_NAMENODE_ID_KEY, nnId);
}
if (manageNameDfsDirs) {
if (enableManagedDfsDirsRedundancy) {
conf.set(DFS_NAMENODE_NAME_DIR_KEY,
fileAsURI(new File(base_dir, "name" + (2*nnIndex + 1)))+","+
fileAsURI(new File(base_dir, "name" + (2*nnIndex + 2))));
conf.set(DFS_NAMENODE_CHECKPOINT_DIR_KEY,
fileAsURI(new File(base_dir, "namesecondary" + (2*nnIndex + 1)))+","+
fileAsURI(new File(base_dir, "namesecondary" + (2*nnIndex + 2))));
} else {
conf.set(DFS_NAMENODE_NAME_DIR_KEY,
fileAsURI(new File(base_dir, "name" + (2*nnIndex + 1))).
toString());
conf.set(DFS_NAMENODE_CHECKPOINT_DIR_KEY,
fileAsURI(new File(base_dir, "namesecondary" + (2*nnIndex + 1))).
toString());
}
}
}
public static void copyNameDirs(Collection<URI> srcDirs, Collection<URI> dstDirs,
Configuration dstConf) throws IOException {
URI srcDir = Lists.newArrayList(srcDirs).get(0);
FileSystem dstFS = FileSystem.getLocal(dstConf).getRaw();
for (URI dstDir : dstDirs) {
Preconditions.checkArgument(!dstDir.equals(srcDir),
"src and dst are the same: " + dstDir);
File dstDirF = new File(dstDir);
if (dstDirF.exists()) {
if (!FileUtil.fullyDelete(dstDirF)) {
throw new IOException("Unable to delete: " + dstDirF);
}
}
LOG.info("Copying namedir from primary node dir "
+ srcDir + " to " + dstDir);
FileUtil.copy(
new File(srcDir),
dstFS, new Path(dstDir), false, dstConf);
}
}
/**
* Initialize the address and port for this NameNode. In the
* non-federated case, the nameservice and namenode ID may be
* null.
*/
private static void initNameNodeAddress(Configuration conf,
String nameserviceId, NNConf nnConf) {
// Set NN-specific specific key
String key = DFSUtil.addKeySuffixes(
DFS_NAMENODE_HTTP_ADDRESS_KEY, nameserviceId,
nnConf.getNnId());
conf.set(key, "127.0.0.1:" + nnConf.getHttpPort());
key = DFSUtil.addKeySuffixes(
DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId,
nnConf.getNnId());
conf.set(key, "127.0.0.1:" + nnConf.getIpcPort());
}
private static String[] createArgs(StartupOption operation) {
if (operation == StartupOption.ROLLINGUPGRADE) {
return new String[]{operation.getName(),
operation.getRollingUpgradeStartupOption().name()};
}
String[] args = (operation == null ||
operation == StartupOption.FORMAT ||
operation == StartupOption.REGULAR) ?
new String[] {} : new String[] {operation.getName()};
return args;
}
private void createNameNode(int nnIndex, Configuration conf,
int numDataNodes, boolean format, StartupOption operation,
String clusterId, String nameserviceId,
String nnId)
throws IOException {
// Format and clean out DataNode directories
if (format) {
DFSTestUtil.formatNameNode(conf);
}
if (operation == StartupOption.UPGRADE){
operation.setClusterId(clusterId);
}
// Start the NameNode after saving the default file system.
String originalDefaultFs = conf.get(FS_DEFAULT_NAME_KEY);
String[] args = createArgs(operation);
NameNode nn = NameNode.createNameNode(args, conf);
if (operation == StartupOption.RECOVER) {
return;
}
// After the NN has started, set back the bound ports into
// the conf
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,
nameserviceId, nnId), nn.getNameNodeAddressHostPortString());
if (nn.getHttpAddress() != null) {
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_HTTP_ADDRESS_KEY,
nameserviceId, nnId), NetUtils.getHostPortString(nn.getHttpAddress()));
}
if (nn.getHttpsAddress() != null) {
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_HTTPS_ADDRESS_KEY,
nameserviceId, nnId), NetUtils.getHostPortString(nn.getHttpsAddress()));
}
DFSUtil.setGenericConf(conf, nameserviceId, nnId,
DFS_NAMENODE_HTTP_ADDRESS_KEY);
nameNodes[nnIndex] = new NameNodeInfo(nn, nameserviceId, nnId,
operation, new Configuration(conf));
// Restore the default fs name
if (originalDefaultFs == null) {
conf.set(FS_DEFAULT_NAME_KEY, "");
} else {
conf.set(FS_DEFAULT_NAME_KEY, originalDefaultFs);
}
}
/**
* @return URI of the namenode from a single namenode MiniDFSCluster
*/
public URI getURI() {
checkSingleNameNode();
return getURI(0);
}
/**
* @return URI of the given namenode in MiniDFSCluster
*/
public URI getURI(int nnIndex) {
String hostPort =
nameNodes[nnIndex].nameNode.getNameNodeAddressHostPortString();
URI uri = null;
try {
uri = new URI("hdfs://" + hostPort);
} catch (URISyntaxException e) {
NameNode.LOG.warn("unexpected URISyntaxException: " + e );
}
return uri;
}
public int getInstanceId() {
return instanceId;
}
/**
* @return Configuration of for the given namenode
*/
public Configuration getConfiguration(int nnIndex) {
return nameNodes[nnIndex].conf;
}
/**
* wait for the given namenode to get out of safemode.
*/
public void waitNameNodeUp(int nnIndex) {
while (!isNameNodeUp(nnIndex)) {
try {
LOG.warn("Waiting for namenode at " + nnIndex + " to start...");
Thread.sleep(1000);
} catch (InterruptedException e) {
}
}
}
/**
* wait for the cluster to get out of safemode.
*/
public void waitClusterUp() throws IOException {
int i = 0;
if (numDataNodes > 0) {
while (!isClusterUp()) {
try {
LOG.warn("Waiting for the Mini HDFS Cluster to start...");
Thread.sleep(1000);
} catch (InterruptedException e) {
}
if (++i > 10) {
final String msg = "Timed out waiting for Mini HDFS Cluster to start";
LOG.error(msg);
throw new IOException(msg);
}
}
}
}
String makeDataNodeDirs(int dnIndex, StorageType[] storageTypes) throws IOException {
StringBuilder sb = new StringBuilder();
for (int j = 0; j < storagesPerDatanode; ++j) {
if ((storageTypes != null) && (j >= storageTypes.length)) {
break;
}
File dir = getInstanceStorageDir(dnIndex, j);
dir.mkdirs();
if (!dir.isDirectory()) {
throw new IOException("Mkdirs failed to create directory for DataNode " + dir);
}
sb.append((j > 0 ? "," : "") + "[" +
(storageTypes == null ? StorageType.DEFAULT : storageTypes[j]) +
"]" + fileAsURI(dir));
}
return sb.toString();
}
/**
* Modify the config and start up additional DataNodes. The info port for
* DataNodes is guaranteed to use a free port.
*
* Data nodes can run with the name node in the mini cluster or
* a real name node. For example, running with a real name node is useful
* when running simulated data nodes with a real name node.
* If minicluster's name node is null assume that the conf has been
* set with the right address:port of the name node.
*
* @param conf the base configuration to use in starting the DataNodes. This
* will be modified as necessary.
* @param numDataNodes Number of DataNodes to start; may be zero
* @param manageDfsDirs if true, the data directories for DataNodes will be
* created and {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be set
* in the conf
* @param operation the operation with which to start the DataNodes. If null
* or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
* @param racks array of strings indicating the rack that each DataNode is on
* @param hosts array of strings indicating the hostnames for each DataNode
* @param simulatedCapacities array of capacities of the simulated data nodes
*
* @throws IllegalStateException if NameNode has been shutdown
*/
public synchronized void startDataNodes(Configuration conf, int numDataNodes,
boolean manageDfsDirs, StartupOption operation,
String[] racks, String[] hosts,
long[] simulatedCapacities) throws IOException {
startDataNodes(conf, numDataNodes, manageDfsDirs, operation, racks,
hosts, simulatedCapacities, false);
}
/**
* Modify the config and start up additional DataNodes. The info port for
* DataNodes is guaranteed to use a free port.
*
* Data nodes can run with the name node in the mini cluster or
* a real name node. For example, running with a real name node is useful
* when running simulated data nodes with a real name node.
* If minicluster's name node is null assume that the conf has been
* set with the right address:port of the name node.
*
* @param conf the base configuration to use in starting the DataNodes. This
* will be modified as necessary.
* @param numDataNodes Number of DataNodes to start; may be zero
* @param manageDfsDirs if true, the data directories for DataNodes will be
* created and {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be
* set in the conf
* @param operation the operation with which to start the DataNodes. If null
* or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
* @param racks array of strings indicating the rack that each DataNode is on
* @param hosts array of strings indicating the hostnames for each DataNode
* @param simulatedCapacities array of capacities of the simulated data nodes
* @param setupHostsFile add new nodes to dfs hosts files
*
* @throws IllegalStateException if NameNode has been shutdown
*/
public synchronized void startDataNodes(Configuration conf, int numDataNodes,
boolean manageDfsDirs, StartupOption operation,
String[] racks, String[] hosts,
long[] simulatedCapacities,
boolean setupHostsFile) throws IOException {
startDataNodes(conf, numDataNodes, null, manageDfsDirs, operation, racks, hosts,
null, simulatedCapacities, setupHostsFile, false, false, null);
}
public synchronized void startDataNodes(Configuration conf, int numDataNodes,
boolean manageDfsDirs, StartupOption operation,
String[] racks, String[] hosts,
long[] simulatedCapacities,
boolean setupHostsFile,
boolean checkDataNodeAddrConfig) throws IOException {
startDataNodes(conf, numDataNodes, null, manageDfsDirs, operation, racks, hosts,
null, simulatedCapacities, setupHostsFile, checkDataNodeAddrConfig, false, null);
}
/**
* Modify the config and start up additional DataNodes. The info port for
* DataNodes is guaranteed to use a free port.
*
* Data nodes can run with the name node in the mini cluster or
* a real name node. For example, running with a real name node is useful
* when running simulated data nodes with a real name node.
* If minicluster's name node is null assume that the conf has been
* set with the right address:port of the name node.
*
* @param conf the base configuration to use in starting the DataNodes. This
* will be modified as necessary.
* @param numDataNodes Number of DataNodes to start; may be zero
* @param manageDfsDirs if true, the data directories for DataNodes will be
* created and {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be
* set in the conf
* @param operation the operation with which to start the DataNodes. If null
* or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
* @param racks array of strings indicating the rack that each DataNode is on
* @param hosts array of strings indicating the hostnames for each DataNode
* @param simulatedCapacities array of capacities of the simulated data nodes
* @param setupHostsFile add new nodes to dfs hosts files
* @param checkDataNodeAddrConfig if true, only set DataNode port addresses if not already set in config
* @param checkDataNodeHostConfig if true, only set DataNode hostname key if not already set in config
* @param dnConfOverlays An array of {@link Configuration} objects that will overlay the
* global MiniDFSCluster Configuration for the corresponding DataNode.
* @throws IllegalStateException if NameNode has been shutdown
*/
public synchronized void startDataNodes(Configuration conf, int numDataNodes,
StorageType[][] storageTypes, boolean manageDfsDirs, StartupOption operation,
String[] racks, String[] hosts,
long[][] storageCapacities,
long[] simulatedCapacities,
boolean setupHostsFile,
boolean checkDataNodeAddrConfig,
boolean checkDataNodeHostConfig,
Configuration[] dnConfOverlays) throws IOException {
assert storageCapacities == null || simulatedCapacities == null;
assert storageTypes == null || storageTypes.length == numDataNodes;
assert storageCapacities == null || storageCapacities.length == numDataNodes;
if (operation == StartupOption.RECOVER) {
return;
}
if (checkDataNodeHostConfig) {
conf.setIfUnset(DFS_DATANODE_HOST_NAME_KEY, "127.0.0.1");
} else {
conf.set(DFS_DATANODE_HOST_NAME_KEY, "127.0.0.1");
}
int curDatanodesNum = dataNodes.size();
final int curDatanodesNumSaved = curDatanodesNum;
// for mincluster's the default initialDelay for BRs is 0
if (conf.get(DFS_BLOCKREPORT_INITIAL_DELAY_KEY) == null) {
conf.setLong(DFS_BLOCKREPORT_INITIAL_DELAY_KEY, 0);
}
// If minicluster's name node is null assume that the conf has been
// set with the right address:port of the name node.
//
if (racks != null && numDataNodes > racks.length ) {
throw new IllegalArgumentException( "The length of racks [" + racks.length
+ "] is less than the number of datanodes [" + numDataNodes + "].");
}
if (hosts != null && numDataNodes > hosts.length ) {
throw new IllegalArgumentException( "The length of hosts [" + hosts.length
+ "] is less than the number of datanodes [" + numDataNodes + "].");
}
//Generate some hostnames if required
if (racks != null && hosts == null) {
hosts = new String[numDataNodes];
for (int i = curDatanodesNum; i < curDatanodesNum + numDataNodes; i++) {
hosts[i - curDatanodesNum] = "host" + i + ".foo.com";
}
}
if (simulatedCapacities != null
&& numDataNodes > simulatedCapacities.length) {
throw new IllegalArgumentException( "The length of simulatedCapacities ["
+ simulatedCapacities.length
+ "] is less than the number of datanodes [" + numDataNodes + "].");
}
if (dnConfOverlays != null
&& numDataNodes > dnConfOverlays.length) {
throw new IllegalArgumentException( "The length of dnConfOverlays ["
+ dnConfOverlays.length
+ "] is less than the number of datanodes [" + numDataNodes + "].");
}
String [] dnArgs = (operation == null ||
operation != StartupOption.ROLLBACK) ?
null : new String[] {operation.getName()};
DataNode[] dns = new DataNode[numDataNodes];
for (int i = curDatanodesNum; i < curDatanodesNum+numDataNodes; i++) {
Configuration dnConf = new HdfsConfiguration(conf);
if (dnConfOverlays != null) {
dnConf.addResource(dnConfOverlays[i]);
}
// Set up datanode address
setupDatanodeAddress(dnConf, setupHostsFile, checkDataNodeAddrConfig);
if (manageDfsDirs) {
String dirs = makeDataNodeDirs(i, storageTypes == null ?
null : storageTypes[i - curDatanodesNum]);
dnConf.set(DFS_DATANODE_DATA_DIR_KEY, dirs);
conf.set(DFS_DATANODE_DATA_DIR_KEY, dirs);
}
if (simulatedCapacities != null) {
SimulatedFSDataset.setFactory(dnConf);
dnConf.setLong(SimulatedFSDataset.CONFIG_PROPERTY_CAPACITY,
simulatedCapacities[i-curDatanodesNum]);
}
LOG.info("Starting DataNode " + i + " with "
+ DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY + ": "
+ dnConf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY));
if (hosts != null) {
dnConf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, hosts[i - curDatanodesNum]);
LOG.info("Starting DataNode " + i + " with hostname set to: "
+ dnConf.get(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY));
}
if (racks != null) {
String name = hosts[i - curDatanodesNum];
LOG.info("Adding node with hostname : " + name + " to rack " +
racks[i-curDatanodesNum]);
StaticMapping.addNodeToRack(name,
racks[i-curDatanodesNum]);
}
Configuration newconf = new HdfsConfiguration(dnConf); // save config
if (hosts != null) {
NetUtils.addStaticResolution(hosts[i - curDatanodesNum], "localhost");
}
SecureResources secureResources = null;
if (UserGroupInformation.isSecurityEnabled() &&
conf.get(DFS_DATA_TRANSFER_PROTECTION_KEY) == null) {
try {
secureResources = SecureDataNodeStarter.getSecureResources(dnConf);
} catch (Exception ex) {
ex.printStackTrace();
}
}
final int maxRetriesOnSasl = conf.getInt(
IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY,
IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_DEFAULT);
int numRetries = 0;
DataNode dn = null;
while (true) {
try {
dn = DataNode.instantiateDataNode(dnArgs, dnConf,
secureResources);
break;
} catch (IOException e) {
// Work around issue testing security where rapidly starting multiple
// DataNodes using the same principal gets rejected by the KDC as a
// replay attack.
if (UserGroupInformation.isSecurityEnabled() &&
numRetries < maxRetriesOnSasl) {
try {
Thread.sleep(1000);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
break;
}
++numRetries;
continue;
}
throw e;
}
}
if(dn == null)
throw new IOException("Cannot start DataNode in "
+ dnConf.get(DFS_DATANODE_DATA_DIR_KEY));
//since the HDFS does things based on host|ip:port, we need to add the
//mapping for the service to rackId
String service =
SecurityUtil.buildTokenService(dn.getXferAddress()).toString();
if (racks != null) {
LOG.info("Adding node with service : " + service +
" to rack " + racks[i-curDatanodesNum]);
StaticMapping.addNodeToRack(service,
racks[i-curDatanodesNum]);
}
dn.runDatanodeDaemon();
dataNodes.add(new DataNodeProperties(dn, newconf, dnArgs,
secureResources, dn.getIpcPort()));
dns[i - curDatanodesNum] = dn;
}
this.numDataNodes += numDataNodes;
waitActive();
if (storageCapacities != null) {
for (int i = curDatanodesNumSaved; i < curDatanodesNumSaved+numDataNodes; ++i) {
final int index = i - curDatanodesNum;
try (FsDatasetSpi.FsVolumeReferences volumes =
dns[index].getFSDataset().getFsVolumeReferences()) {
assert storageCapacities[index].length == storagesPerDatanode;
assert volumes.size() == storagesPerDatanode;
int j = 0;
for (FsVolumeSpi fvs : volumes) {
FsVolumeImpl volume = (FsVolumeImpl) fvs;
LOG.info("setCapacityForTesting " + storageCapacities[index][j]
+ " for [" + volume.getStorageType() + "]" + volume
.getStorageID());
volume.setCapacityForTesting(storageCapacities[index][j]);
j++;
}
}
}
}
}
/**
* Modify the config and start up the DataNodes. The info port for
* DataNodes is guaranteed to use a free port.
*
* @param conf the base configuration to use in starting the DataNodes. This
* will be modified as necessary.
* @param numDataNodes Number of DataNodes to start; may be zero
* @param manageDfsDirs if true, the data directories for DataNodes will be
* created and {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be
* set in the conf
* @param operation the operation with which to start the DataNodes. If null
* or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
* @param racks array of strings indicating the rack that each DataNode is on
*
* @throws IllegalStateException if NameNode has been shutdown
*/
public void startDataNodes(Configuration conf, int numDataNodes,
boolean manageDfsDirs, StartupOption operation,
String[] racks
) throws IOException {
startDataNodes(conf, numDataNodes, manageDfsDirs, operation, racks, null,
null, false);
}
/**
* Modify the config and start up additional DataNodes. The info port for
* DataNodes is guaranteed to use a free port.
*
* Data nodes can run with the name node in the mini cluster or
* a real name node. For example, running with a real name node is useful
* when running simulated data nodes with a real name node.
* If minicluster's name node is null assume that the conf has been
* set with the right address:port of the name node.
*
* @param conf the base configuration to use in starting the DataNodes. This
* will be modified as necessary.
* @param numDataNodes Number of DataNodes to start; may be zero
* @param manageDfsDirs if true, the data directories for DataNodes will be
* created and {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will
* be set in the conf
* @param operation the operation with which to start the DataNodes. If null
* or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
* @param racks array of strings indicating the rack that each DataNode is on
* @param simulatedCapacities array of capacities of the simulated data nodes
*
* @throws IllegalStateException if NameNode has been shutdown
*/
public void startDataNodes(Configuration conf, int numDataNodes,
boolean manageDfsDirs, StartupOption operation,
String[] racks,
long[] simulatedCapacities) throws IOException {
startDataNodes(conf, numDataNodes, manageDfsDirs, operation, racks, null,
simulatedCapacities, false);
}
/**
* Finalize the namenode. Block pools corresponding to the namenode are
* finalized on the datanode.
*/
private void finalizeNamenode(NameNode nn, Configuration conf) throws Exception {
if (nn == null) {
throw new IllegalStateException("Attempting to finalize "
+ "Namenode but it is not running");
}
ToolRunner.run(new DFSAdmin(conf), new String[]{"-finalizeUpgrade"});
}
/**
* Finalize cluster for the namenode at the given index
* @see MiniDFSCluster#finalizeCluster(Configuration)
* @param nnIndex index of the namenode
* @param conf configuration
* @throws Exception
*/
public void finalizeCluster(int nnIndex, Configuration conf) throws Exception {
finalizeNamenode(nameNodes[nnIndex].nameNode, nameNodes[nnIndex].conf);
}
/**
* If the NameNode is running, attempt to finalize a previous upgrade.
* When this method return, the NameNode should be finalized, but
* DataNodes may not be since that occurs asynchronously.
*
* @throws IllegalStateException if the Namenode is not running.
*/
public void finalizeCluster(Configuration conf) throws Exception {
for (NameNodeInfo nnInfo : nameNodes) {
if (nnInfo == null) {
throw new IllegalStateException("Attempting to finalize "
+ "Namenode but it is not running");
}
finalizeNamenode(nnInfo.nameNode, nnInfo.conf);
}
}
public int getNumNameNodes() {
return nameNodes.length;
}
/**
* Gets the started NameNode. May be null.
*/
public NameNode getNameNode() {
checkSingleNameNode();
return getNameNode(0);
}
/**
* Get an instance of the NameNode's RPC handler.
*/
public NamenodeProtocols getNameNodeRpc() {
checkSingleNameNode();
return getNameNodeRpc(0);
}
/**
* Get an instance of the NameNode's RPC handler.
*/
public NamenodeProtocols getNameNodeRpc(int nnIndex) {
return getNameNode(nnIndex).getRpcServer();
}
/**
* Gets the NameNode for the index. May be null.
*/
public NameNode getNameNode(int nnIndex) {
return nameNodes[nnIndex].nameNode;
}
/**
* Return the {@link FSNamesystem} object.
* @return {@link FSNamesystem} object.
*/
public FSNamesystem getNamesystem() {
checkSingleNameNode();
return NameNodeAdapter.getNamesystem(nameNodes[0].nameNode);
}
public FSNamesystem getNamesystem(int nnIndex) {
return NameNodeAdapter.getNamesystem(nameNodes[nnIndex].nameNode);
}
/**
* Gets a list of the started DataNodes. May be empty.
*/
public ArrayList<DataNode> getDataNodes() {
ArrayList<DataNode> list = new ArrayList<DataNode>();
for (int i = 0; i < dataNodes.size(); i++) {
DataNode node = dataNodes.get(i).datanode;
list.add(node);
}
return list;
}
/** @return the datanode having the ipc server listen port */
public DataNode getDataNode(int ipcPort) {
for(DataNode dn : getDataNodes()) {
if (dn.ipcServer.getListenerAddress().getPort() == ipcPort) {
return dn;
}
}
return null;
}
/**
* Gets the rpc port used by the NameNode, because the caller
* supplied port is not necessarily the actual port used.
* Assumption: cluster has a single namenode
*/
public int getNameNodePort() {
checkSingleNameNode();
return getNameNodePort(0);
}
/**
* Gets the rpc port used by the NameNode at the given index, because the
* caller supplied port is not necessarily the actual port used.
*/
public int getNameNodePort(int nnIndex) {
return nameNodes[nnIndex].nameNode.getNameNodeAddress().getPort();
}
/**
* @return the service rpc port used by the NameNode at the given index.
*/
public int getNameNodeServicePort(int nnIndex) {
return nameNodes[nnIndex].nameNode.getServiceRpcAddress().getPort();
}
/**
* Shutdown all the nodes in the cluster.
*/
public void shutdown() {
shutdown(false);
}
/**
* Shutdown all the nodes in the cluster.
*/
public void shutdown(boolean deleteDfsDir) {
shutdown(deleteDfsDir, true);
}
/**
* Shutdown all the nodes in the cluster.
*/
public void shutdown(boolean deleteDfsDir, boolean closeFileSystem) {
LOG.info("Shutting down the Mini HDFS Cluster");
if (checkExitOnShutdown) {
if (ExitUtil.terminateCalled()) {
LOG.fatal("Test resulted in an unexpected exit",
ExitUtil.getFirstExitException());
ExitUtil.resetFirstExitException();
throw new AssertionError("Test resulted in an unexpected exit");
}
}
if (closeFileSystem) {
for (FileSystem fs : fileSystems) {
try {
fs.close();
} catch (IOException ioe) {
LOG.warn("Exception while closing file system", ioe);
}
}
fileSystems.clear();
}
shutdownDataNodes();
for (NameNodeInfo nnInfo : nameNodes) {
if (nnInfo == null) continue;
NameNode nameNode = nnInfo.nameNode;
if (nameNode != null) {
nameNode.stop();
nameNode.join();
nameNode = null;
}
}
if (base_dir != null) {
if (deleteDfsDir) {
base_dir.delete();
} else {
base_dir.deleteOnExit();
}
}
}
/**
* Shutdown all DataNodes started by this class. The NameNode
* is left running so that new DataNodes may be started.
*/
public void shutdownDataNodes() {
for (int i = dataNodes.size()-1; i >= 0; i--) {
LOG.info("Shutting down DataNode " + i);
DataNode dn = dataNodes.remove(i).datanode;
dn.shutdown();
numDataNodes--;
}
}
/**
* Shutdown all the namenodes.
*/
public synchronized void shutdownNameNodes() {
for (int i = 0; i < nameNodes.length; i++) {
shutdownNameNode(i);
}
}
/**
* Shutdown the namenode at a given index.
*/
public synchronized void shutdownNameNode(int nnIndex) {
NameNode nn = nameNodes[nnIndex].nameNode;
if (nn != null) {
LOG.info("Shutting down the namenode");
nn.stop();
nn.join();
Configuration conf = nameNodes[nnIndex].conf;
nameNodes[nnIndex] = new NameNodeInfo(null, null, null, null, conf);
}
}
/**
* Restart all namenodes.
*/
public synchronized void restartNameNodes() throws IOException {
for (int i = 0; i < nameNodes.length; i++) {
restartNameNode(i, false);
}
waitActive();
}
/**
* Restart the namenode.
*/
public synchronized void restartNameNode(String... args) throws IOException {
checkSingleNameNode();
restartNameNode(0, true, args);
}
/**
* Restart the namenode. Optionally wait for the cluster to become active.
*/
public synchronized void restartNameNode(boolean waitActive)
throws IOException {
checkSingleNameNode();
restartNameNode(0, waitActive);
}
/**
* Restart the namenode at a given index.
*/
public synchronized void restartNameNode(int nnIndex) throws IOException {
restartNameNode(nnIndex, true);
}
/**
* Restart the namenode at a given index. Optionally wait for the cluster
* to become active.
*/
public synchronized void restartNameNode(int nnIndex, boolean waitActive,
String... args) throws IOException {
String nameserviceId = nameNodes[nnIndex].nameserviceId;
String nnId = nameNodes[nnIndex].nnId;
StartupOption startOpt = nameNodes[nnIndex].startOpt;
Configuration conf = nameNodes[nnIndex].conf;
shutdownNameNode(nnIndex);
if (args.length != 0) {
startOpt = null;
} else {
args = createArgs(startOpt);
}
NameNode nn = NameNode.createNameNode(args, conf);
nameNodes[nnIndex] = new NameNodeInfo(nn, nameserviceId, nnId, startOpt,
conf);
if (waitActive) {
waitClusterUp();
LOG.info("Restarted the namenode");
waitActive();
}
}
private int corruptBlockOnDataNodesHelper(ExtendedBlock block,
boolean deleteBlockFile) throws IOException {
int blocksCorrupted = 0;
File[] blockFiles = getAllBlockFiles(block);
for (File f : blockFiles) {
if ((deleteBlockFile && corruptBlockByDeletingBlockFile(f)) ||
(!deleteBlockFile && corruptBlock(f))) {
blocksCorrupted++;
}
}
return blocksCorrupted;
}
/**
* Return the number of corrupted replicas of the given block.
*
* @param block block to be corrupted
* @throws IOException on error accessing the file for the given block
*/
public int corruptBlockOnDataNodes(ExtendedBlock block) throws IOException{
return corruptBlockOnDataNodesHelper(block, false);
}
/**
* Return the number of corrupted replicas of the given block.
*
* @param block block to be corrupted
* @throws IOException on error accessing the file for the given block
*/
public int corruptBlockOnDataNodesByDeletingBlockFile(ExtendedBlock block)
throws IOException{
return corruptBlockOnDataNodesHelper(block, true);
}
public String readBlockOnDataNode(int i, ExtendedBlock block)
throws IOException {
assert (i >= 0 && i < dataNodes.size()) : "Invalid datanode "+i;
File blockFile = getBlockFile(i, block);
if (blockFile != null && blockFile.exists()) {
return DFSTestUtil.readFile(blockFile);
}
return null;
}
/**
* Corrupt a block on a particular datanode.
*
* @param i index of the datanode
* @param blk name of the block
* @throws IOException on error accessing the given block or if
* the contents of the block (on the same datanode) differ.
* @return true if a replica was corrupted, false otherwise
* Types: delete, write bad data, truncate
*/
public boolean corruptReplica(int i, ExtendedBlock blk)
throws IOException {
File blockFile = getBlockFile(i, blk);
return corruptBlock(blockFile);
}
/*
* Corrupt a block on a particular datanode
*/
public static boolean corruptBlock(File blockFile) throws IOException {
if (blockFile == null || !blockFile.exists()) {
return false;
}
// Corrupt replica by writing random bytes into replica
Random random = new Random();
RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
FileChannel channel = raFile.getChannel();
String badString = "BADBAD";
int rand = random.nextInt((int)channel.size()/2);
raFile.seek(rand);
raFile.write(badString.getBytes());
raFile.close();
LOG.warn("Corrupting the block " + blockFile);
return true;
}
/*
* Corrupt a block on a particular datanode by deleting the block file
*/
public static boolean corruptBlockByDeletingBlockFile(File blockFile)
throws IOException {
if (blockFile == null || !blockFile.exists()) {
return false;
}
return blockFile.delete();
}
public boolean changeGenStampOfBlock(int dnIndex, ExtendedBlock blk,
long newGenStamp) throws IOException {
File blockFile = getBlockFile(dnIndex, blk);
File metaFile = FsDatasetUtil.findMetaFile(blockFile);
return metaFile.renameTo(new File(DatanodeUtil.getMetaName(
blockFile.getAbsolutePath(), newGenStamp)));
}
/*
* Shutdown a particular datanode
* @param i node index
* @return null if the node index is out of range, else the properties of the
* removed node
*/
public synchronized DataNodeProperties stopDataNode(int i) {
if (i < 0 || i >= dataNodes.size()) {
return null;
}
DataNodeProperties dnprop = dataNodes.remove(i);
DataNode dn = dnprop.datanode;
LOG.info("MiniDFSCluster Stopping DataNode " +
dn.getDisplayName() +
" from a total of " + (dataNodes.size() + 1) +
" datanodes.");
dn.shutdown();
numDataNodes--;
return dnprop;
}
/*
* Shutdown a datanode by name.
* @return the removed datanode or null if there was no match
*/
public synchronized DataNodeProperties stopDataNode(String dnName) {
int node = -1;
for (int i = 0; i < dataNodes.size(); i++) {
DataNode dn = dataNodes.get(i).datanode;
LOG.info("DN name=" + dnName + " found DN=" + dn +
" with name=" + dn.getDisplayName());
if (dnName.equals(dn.getDatanodeId().getXferAddr())) {
node = i;
break;
}
}
return stopDataNode(node);
}
/**
* Restart a datanode
* @param dnprop datanode's property
* @return true if restarting is successful
* @throws IOException
*/
public boolean restartDataNode(DataNodeProperties dnprop) throws IOException {
return restartDataNode(dnprop, false);
}
/**
* Restart a datanode, on the same port if requested
* @param dnprop the datanode to restart
* @param keepPort whether to use the same port
* @return true if restarting is successful
* @throws IOException
*/
public synchronized boolean restartDataNode(DataNodeProperties dnprop,
boolean keepPort) throws IOException {
Configuration conf = dnprop.conf;
String[] args = dnprop.dnArgs;
SecureResources secureResources = dnprop.secureResources;
Configuration newconf = new HdfsConfiguration(conf); // save cloned config
if (keepPort) {
InetSocketAddress addr = dnprop.datanode.getXferAddress();
conf.set(DFS_DATANODE_ADDRESS_KEY,
addr.getAddress().getHostAddress() + ":" + addr.getPort());
conf.set(DFS_DATANODE_IPC_ADDRESS_KEY,
addr.getAddress().getHostAddress() + ":" + dnprop.ipcPort);
}
DataNode newDn = DataNode.createDataNode(args, conf, secureResources);
dataNodes.add(new DataNodeProperties(
newDn, newconf, args, secureResources, newDn.getIpcPort()));
numDataNodes++;
return true;
}
/*
* Restart a particular datanode, use newly assigned port
*/
public boolean restartDataNode(int i) throws IOException {
return restartDataNode(i, false);
}
/*
* Restart a particular datanode, on the same port if keepPort is true
*/
public synchronized boolean restartDataNode(int i, boolean keepPort)
throws IOException {
return restartDataNode(i, keepPort, false);
}
/**
* Restart a particular DataNode.
* @param idn index of the DataNode
* @param keepPort true if should restart on the same port
* @param expireOnNN true if NameNode should expire the DataNode heartbeat
* @return
* @throws IOException
*/
public synchronized boolean restartDataNode(
int idn, boolean keepPort, boolean expireOnNN) throws IOException {
DataNodeProperties dnprop = stopDataNode(idn);
if(expireOnNN) {
setDataNodeDead(dnprop.datanode.getDatanodeId());
}
if (dnprop == null) {
return false;
} else {
return restartDataNode(dnprop, keepPort);
}
}
/**
* Expire a DataNode heartbeat on the NameNode
* @param dnId
* @throws IOException
*/
public void setDataNodeDead(DatanodeID dnId) throws IOException {
DatanodeDescriptor dnd =
NameNodeAdapter.getDatanode(getNamesystem(), dnId);
DFSTestUtil.setDatanodeDead(dnd);
BlockManagerTestUtil.checkHeartbeat(getNamesystem().getBlockManager());
}
public void setDataNodesDead() throws IOException {
for (DataNodeProperties dnp : dataNodes) {
setDataNodeDead(dnp.datanode.getDatanodeId());
}
}
/*
* Restart all datanodes, on the same ports if keepPort is true
*/
public synchronized boolean restartDataNodes(boolean keepPort)
throws IOException {
for (int i = dataNodes.size() - 1; i >= 0; i--) {
if (!restartDataNode(i, keepPort))
return false;
LOG.info("Restarted DataNode " + i);
}
return true;
}
/*
* Restart all datanodes, use newly assigned ports
*/
public boolean restartDataNodes() throws IOException {
return restartDataNodes(false);
}
/**
* Returns true if the NameNode is running and is out of Safe Mode
* or if waiting for safe mode is disabled.
*/
public boolean isNameNodeUp(int nnIndex) {
NameNode nameNode = nameNodes[nnIndex].nameNode;
if (nameNode == null) {
return false;
}
long[] sizes;
sizes = NameNodeAdapter.getStats(nameNode.getNamesystem());
boolean isUp = false;
synchronized (this) {
isUp = ((!nameNode.isInSafeMode() || !waitSafeMode) &&
sizes[ClientProtocol.GET_STATS_CAPACITY_IDX] != 0);
}
return isUp;
}
/**
* Returns true if all the NameNodes are running and is out of Safe Mode.
*/
public boolean isClusterUp() {
for (int index = 0; index < nameNodes.length; index++) {
if (!isNameNodeUp(index)) {
return false;
}
}
return true;
}
/**
* Returns true if there is at least one DataNode running.
*/
public boolean isDataNodeUp() {
if (dataNodes == null || dataNodes.size() == 0) {
return false;
}
for (DataNodeProperties dn : dataNodes) {
if (dn.datanode.isDatanodeUp()) {
return true;
}
}
return false;
}
/**
* Get a client handle to the DFS cluster with a single namenode.
*/
public DistributedFileSystem getFileSystem() throws IOException {
checkSingleNameNode();
return getFileSystem(0);
}
/**
* Get a client handle to the DFS cluster for the namenode at given index.
*/
public DistributedFileSystem getFileSystem(int nnIndex) throws IOException {
DistributedFileSystem dfs = (DistributedFileSystem) FileSystem.get(
getURI(nnIndex), nameNodes[nnIndex].conf);
fileSystems.add(dfs);
return dfs;
}
/**
* Get another FileSystem instance that is different from FileSystem.get(conf).
* This simulating different threads working on different FileSystem instances.
*/
public FileSystem getNewFileSystemInstance(int nnIndex) throws IOException {
FileSystem dfs = FileSystem.newInstance(getURI(nnIndex), nameNodes[nnIndex].conf);
fileSystems.add(dfs);
return dfs;
}
/**
* @return a http URL
*/
public String getHttpUri(int nnIndex) {
return "http://"
+ nameNodes[nnIndex].conf
.get(DFS_NAMENODE_HTTP_ADDRESS_KEY);
}
/**
* @return a {@link HftpFileSystem} object.
*/
public HftpFileSystem getHftpFileSystem(int nnIndex) throws IOException {
String uri = "hftp://"
+ nameNodes[nnIndex].conf
.get(DFS_NAMENODE_HTTP_ADDRESS_KEY);
try {
return (HftpFileSystem)FileSystem.get(new URI(uri), conf);
} catch (URISyntaxException e) {
throw new IOException(e);
}
}
/**
* @return a {@link HftpFileSystem} object as specified user.
*/
public HftpFileSystem getHftpFileSystemAs(final String username,
final Configuration conf, final int nnIndex, final String... groups)
throws IOException, InterruptedException {
final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
username, groups);
return ugi.doAs(new PrivilegedExceptionAction<HftpFileSystem>() {
@Override
public HftpFileSystem run() throws Exception {
return getHftpFileSystem(nnIndex);
}
});
}
/**
* Get the directories where the namenode stores its image.
*/
public Collection<URI> getNameDirs(int nnIndex) {
return FSNamesystem.getNamespaceDirs(nameNodes[nnIndex].conf);
}
/**
* Get the directories where the namenode stores its edits.
*/
public Collection<URI> getNameEditsDirs(int nnIndex) throws IOException {
return FSNamesystem.getNamespaceEditsDirs(nameNodes[nnIndex].conf);
}
public void transitionToActive(int nnIndex) throws IOException,
ServiceFailedException {
getNameNode(nnIndex).getRpcServer().transitionToActive(
new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER_FORCED));
}
public void transitionToStandby(int nnIndex) throws IOException,
ServiceFailedException {
getNameNode(nnIndex).getRpcServer().transitionToStandby(
new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER_FORCED));
}
public void triggerBlockReports()
throws IOException {
for (DataNode dn : getDataNodes()) {
DataNodeTestUtils.triggerBlockReport(dn);
}
}
public void triggerDeletionReports()
throws IOException {
for (DataNode dn : getDataNodes()) {
DataNodeTestUtils.triggerDeletionReport(dn);
}
}
public void triggerHeartbeats()
throws IOException {
for (DataNode dn : getDataNodes()) {
DataNodeTestUtils.triggerHeartbeat(dn);
}
}
/** Wait until the given namenode gets registration from all the datanodes */
public void waitActive(int nnIndex) throws IOException {
if (nameNodes.length == 0 || nameNodes[nnIndex] == null
|| nameNodes[nnIndex].nameNode == null) {
return;
}
InetSocketAddress addr = nameNodes[nnIndex].nameNode.getServiceRpcAddress();
assert addr.getPort() != 0;
DFSClient client = new DFSClient(addr, conf);
// ensure all datanodes have registered and sent heartbeat to the namenode
while (shouldWait(client.datanodeReport(DatanodeReportType.LIVE), addr)) {
try {
LOG.info("Waiting for cluster to become active");
Thread.sleep(100);
} catch (InterruptedException e) {
}
}
client.close();
}
/**
* Wait until the cluster is active and running.
*/
public void waitActive() throws IOException {
for (int index = 0; index < nameNodes.length; index++) {
int failedCount = 0;
while (true) {
try {
waitActive(index);
break;
} catch (IOException e) {
failedCount++;
// Cached RPC connection to namenode, if any, is expected to fail once
if (failedCount > 1) {
LOG.warn("Tried waitActive() " + failedCount
+ " time(s) and failed, giving up. "
+ StringUtils.stringifyException(e));
throw e;
}
}
}
}
LOG.info("Cluster is active");
}
private synchronized boolean shouldWait(DatanodeInfo[] dnInfo,
InetSocketAddress addr) {
// If a datanode failed to start, then do not wait
for (DataNodeProperties dn : dataNodes) {
// the datanode thread communicating with the namenode should be alive
if (!dn.datanode.isConnectedToNN(addr)) {
LOG.warn("BPOfferService in datanode " + dn.datanode
+ " failed to connect to namenode at " + addr);
return false;
}
}
// Wait for expected number of datanodes to start
if (dnInfo.length != numDataNodes) {
LOG.info("dnInfo.length != numDataNodes");
return true;
}
// if one of the data nodes is not fully started, continue to wait
for (DataNodeProperties dn : dataNodes) {
if (!dn.datanode.isDatanodeFullyStarted()) {
LOG.info("!dn.datanode.isDatanodeFullyStarted()");
return true;
}
}
// make sure all datanodes have sent first heartbeat to namenode,
// using (capacity == 0) as proxy.
for (DatanodeInfo dn : dnInfo) {
if (dn.getCapacity() == 0 || dn.getLastUpdate() <= 0) {
LOG.info("No heartbeat from DataNode: " + dn.toString());
return true;
}
}
// If datanode dataset is not initialized then wait
for (DataNodeProperties dn : dataNodes) {
if (DataNodeTestUtils.getFSDataset(dn.datanode) == null) {
LOG.info("DataNodeTestUtils.getFSDataset(dn.datanode) == null");
return true;
}
}
return false;
}
public void formatDataNodeDirs() throws IOException {
base_dir = new File(determineDfsBaseDir());
data_dir = new File(base_dir, "data");
if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) {
throw new IOException("Cannot remove data directory: " + data_dir);
}
}
/**
*
* @param dataNodeIndex - data node whose block report is desired - the index is same as for getDataNodes()
* @return the block report for the specified data node
*/
public Map<DatanodeStorage, BlockListAsLongs> getBlockReport(String bpid, int dataNodeIndex) {
if (dataNodeIndex < 0 || dataNodeIndex > dataNodes.size()) {
throw new IndexOutOfBoundsException();
}
final DataNode dn = dataNodes.get(dataNodeIndex).datanode;
return DataNodeTestUtils.getFSDataset(dn).getBlockReports(bpid);
}
/**
*
* @return block reports from all data nodes
* BlockListAsLongs is indexed in the same order as the list of datanodes returned by getDataNodes()
*/
public List<Map<DatanodeStorage, BlockListAsLongs>> getAllBlockReports(String bpid) {
int numDataNodes = dataNodes.size();
final List<Map<DatanodeStorage, BlockListAsLongs>> result
= new ArrayList<Map<DatanodeStorage, BlockListAsLongs>>(numDataNodes);
for (int i = 0; i < numDataNodes; ++i) {
result.add(getBlockReport(bpid, i));
}
return result;
}
/**
* This method is valid only if the data nodes have simulated data
* @param dataNodeIndex - data node i which to inject - the index is same as for getDataNodes()
* @param blocksToInject - the blocks
* @param bpid - (optional) the block pool id to use for injecting blocks.
* If not supplied then it is queried from the in-process NameNode.
* @throws IOException
* if not simulatedFSDataset
* if any of blocks already exist in the data node
*
*/
public void injectBlocks(int dataNodeIndex,
Iterable<Block> blocksToInject, String bpid) throws IOException {
if (dataNodeIndex < 0 || dataNodeIndex > dataNodes.size()) {
throw new IndexOutOfBoundsException();
}
final DataNode dn = dataNodes.get(dataNodeIndex).datanode;
final FsDatasetSpi<?> dataSet = DataNodeTestUtils.getFSDataset(dn);
if (!(dataSet instanceof SimulatedFSDataset)) {
throw new IOException("injectBlocks is valid only for SimilatedFSDataset");
}
if (bpid == null) {
bpid = getNamesystem().getBlockPoolId();
}
SimulatedFSDataset sdataset = (SimulatedFSDataset) dataSet;
sdataset.injectBlocks(bpid, blocksToInject);
dataNodes.get(dataNodeIndex).datanode.scheduleAllBlockReport(0);
}
/**
* Multiple-NameNode version of injectBlocks.
*/
public void injectBlocks(int nameNodeIndex, int dataNodeIndex,
Iterable<Block> blocksToInject) throws IOException {
if (dataNodeIndex < 0 || dataNodeIndex > dataNodes.size()) {
throw new IndexOutOfBoundsException();
}
final DataNode dn = dataNodes.get(dataNodeIndex).datanode;
final FsDatasetSpi<?> dataSet = DataNodeTestUtils.getFSDataset(dn);
if (!(dataSet instanceof SimulatedFSDataset)) {
throw new IOException("injectBlocks is valid only for SimilatedFSDataset");
}
String bpid = getNamesystem(nameNodeIndex).getBlockPoolId();
SimulatedFSDataset sdataset = (SimulatedFSDataset) dataSet;
sdataset.injectBlocks(bpid, blocksToInject);
dataNodes.get(dataNodeIndex).datanode.scheduleAllBlockReport(0);
}
/**
* Set the softLimit and hardLimit of client lease periods
*/
public void setLeasePeriod(long soft, long hard) {
NameNodeAdapter.setLeasePeriod(getNamesystem(), soft, hard);
}
public void setLeasePeriod(long soft, long hard, int nnIndex) {
NameNodeAdapter.setLeasePeriod(getNamesystem(nnIndex), soft, hard);
}
public void setWaitSafeMode(boolean wait) {
this.waitSafeMode = wait;
}
/**
* Returns the current set of datanodes
*/
DataNode[] listDataNodes() {
DataNode[] list = new DataNode[dataNodes.size()];
for (int i = 0; i < dataNodes.size(); i++) {
list[i] = dataNodes.get(i).datanode;
}
return list;
}
/**
* Access to the data directory used for Datanodes
*/
public String getDataDirectory() {
return data_dir.getAbsolutePath();
}
/**
* Get the base directory for this MiniDFS instance.
* <p/>
* Within the MiniDFCluster class and any subclasses, this method should be
* used instead of {@link #getBaseDirectory()} which doesn't support
* configuration-specific base directories.
* <p/>
* First the Configuration property {@link #HDFS_MINIDFS_BASEDIR} is fetched.
* If non-null, this is returned.
* If this is null, then {@link #getBaseDirectory()} is called.
* @return the base directory for this instance.
*/
protected String determineDfsBaseDir() {
if (conf != null) {
final String dfsdir = conf.get(HDFS_MINIDFS_BASEDIR, null);
if (dfsdir != null) {
return dfsdir;
}
}
return getBaseDirectory();
}
/**
* Get the base directory for any DFS cluster whose configuration does
* not explicitly set it. This is done by retrieving the system property
* {@link #PROP_TEST_BUILD_DATA} (defaulting to "build/test/data" ),
* and returning that directory with a subdir of /dfs.
* @return a directory for use as a miniDFS filesystem.
*/
public static String getBaseDirectory() {
return System.getProperty(PROP_TEST_BUILD_DATA, "build/test/data") + "/dfs/";
}
/**
* Get a storage directory for a datanode in this specific instance of
* a MiniCluster.
*
* @param dnIndex datanode index (starts from 0)
* @param dirIndex directory index (0 or 1). Index 0 provides access to the
* first storage directory. Index 1 provides access to the second
* storage directory.
* @return Storage directory
*/
public File getInstanceStorageDir(int dnIndex, int dirIndex) {
return new File(base_dir, getStorageDirPath(dnIndex, dirIndex));
}
/**
* Get a storage directory for a datanode.
* <ol>
* <li><base directory>/data/data<2*dnIndex + 1></li>
* <li><base directory>/data/data<2*dnIndex + 2></li>
* </ol>
*
* @param dnIndex datanode index (starts from 0)
* @param dirIndex directory index.
* @return Storage directory
*/
public File getStorageDir(int dnIndex, int dirIndex) {
return new File(getBaseDirectory(), getStorageDirPath(dnIndex, dirIndex));
}
/**
* Calculate the DN instance-specific path for appending to the base dir
* to determine the location of the storage of a DN instance in the mini cluster
* @param dnIndex datanode index
* @param dirIndex directory index.
* @return storage directory path
*/
private String getStorageDirPath(int dnIndex, int dirIndex) {
return "data/data" + (storagesPerDatanode * dnIndex + 1 + dirIndex);
}
/**
* Get current directory corresponding to the datanode as defined in
* (@link Storage#STORAGE_DIR_CURRENT}
* @param storageDir the storage directory of a datanode.
* @return the datanode current directory
*/
public static String getDNCurrentDir(File storageDir) {
return storageDir + "/" + Storage.STORAGE_DIR_CURRENT + "/";
}
/**
* Get directory corresponding to block pool directory in the datanode
* @param storageDir the storage directory of a datanode.
* @return the block pool directory
*/
public static String getBPDir(File storageDir, String bpid) {
return getDNCurrentDir(storageDir) + bpid + "/";
}
/**
* Get directory relative to block pool directory in the datanode
* @param storageDir storage directory
* @return current directory in the given storage directory
*/
public static String getBPDir(File storageDir, String bpid, String dirName) {
return getBPDir(storageDir, bpid) + dirName + "/";
}
/**
* Get finalized directory for a block pool
* @param storageDir storage directory
* @param bpid Block pool Id
* @return finalized directory for a block pool
*/
public static File getRbwDir(File storageDir, String bpid) {
return new File(getBPDir(storageDir, bpid, Storage.STORAGE_DIR_CURRENT)
+ DataStorage.STORAGE_DIR_RBW );
}
/**
* Get finalized directory for a block pool
* @param storageDir storage directory
* @param bpid Block pool Id
* @return finalized directory for a block pool
*/
public static File getFinalizedDir(File storageDir, String bpid) {
return new File(getBPDir(storageDir, bpid, Storage.STORAGE_DIR_CURRENT)
+ DataStorage.STORAGE_DIR_FINALIZED );
}
/**
* Get file correpsonding to a block
* @param storageDir storage directory
* @param blk the block
* @return data file corresponding to the block
*/
public static File getBlockFile(File storageDir, ExtendedBlock blk) {
return new File(DatanodeUtil.idToBlockDir(getFinalizedDir(storageDir,
blk.getBlockPoolId()), blk.getBlockId()), blk.getBlockName());
}
/**
* Get the latest metadata file correpsonding to a block
* @param storageDir storage directory
* @param blk the block
* @return metadata file corresponding to the block
*/
public static File getBlockMetadataFile(File storageDir, ExtendedBlock blk) {
return new File(DatanodeUtil.idToBlockDir(getFinalizedDir(storageDir,
blk.getBlockPoolId()), blk.getBlockId()), blk.getBlockName() + "_" +
blk.getGenerationStamp() + Block.METADATA_EXTENSION);
}
/**
* Return all block metadata files in given directory (recursive search)
*/
public static List<File> getAllBlockMetadataFiles(File storageDir) {
List<File> results = new ArrayList<File>();
File[] files = storageDir.listFiles();
if (files == null) {
return null;
}
for (File f : files) {
if (f.getName().startsWith(Block.BLOCK_FILE_PREFIX) &&
f.getName().endsWith(Block.METADATA_EXTENSION)) {
results.add(f);
} else if (f.isDirectory()) {
List<File> subdirResults = getAllBlockMetadataFiles(f);
if (subdirResults != null) {
results.addAll(subdirResults);
}
}
}
return results;
}
/**
* Shut down a cluster if it is not null
* @param cluster cluster reference or null
*/
public static void shutdownCluster(MiniDFSCluster cluster) {
if (cluster != null) {
cluster.shutdown();
}
}
/**
* Get all files related to a block from all the datanodes
* @param block block for which corresponding files are needed
*/
public File[] getAllBlockFiles(ExtendedBlock block) {
if (dataNodes.size() == 0) return new File[0];
ArrayList<File> list = new ArrayList<File>();
for (int i=0; i < dataNodes.size(); i++) {
File blockFile = getBlockFile(i, block);
if (blockFile != null) {
list.add(blockFile);
}
}
return list.toArray(new File[list.size()]);
}
/**
* Get the block data file for a block from a given datanode
* @param dnIndex Index of the datanode to get block files for
* @param block block for which corresponding files are needed
*/
public File getBlockFile(int dnIndex, ExtendedBlock block) {
// Check for block file in the two storage directories of the datanode
for (int i = 0; i <=1 ; i++) {
File storageDir = getStorageDir(dnIndex, i);
File blockFile = getBlockFile(storageDir, block);
if (blockFile.exists()) {
return blockFile;
}
}
return null;
}
/**
* Get the block metadata file for a block from a given datanode
*
* @param dnIndex Index of the datanode to get block files for
* @param block block for which corresponding files are needed
*/
public File getBlockMetadataFile(int dnIndex, ExtendedBlock block) {
// Check for block file in the two storage directories of the datanode
for (int i = 0; i <=1 ; i++) {
File storageDir = getStorageDir(dnIndex, i);
File blockMetaFile = getBlockMetadataFile(storageDir, block);
if (blockMetaFile.exists()) {
return blockMetaFile;
}
}
return null;
}
/**
* Throw an exception if the MiniDFSCluster is not started with a single
* namenode
*/
private void checkSingleNameNode() {
if (nameNodes.length != 1) {
throw new IllegalArgumentException("Namenode index is needed");
}
}
/**
* Add a namenode to a federated cluster and start it. Configuration of
* datanodes in the cluster is refreshed to register with the new namenode.
*
* @return newly started namenode
*/
public NameNode addNameNode(Configuration conf, int namenodePort)
throws IOException {
if(!federation)
throw new IOException("cannot add namenode to non-federated cluster");
int nnIndex = nameNodes.length;
int numNameNodes = nameNodes.length + 1;
NameNodeInfo[] newlist = new NameNodeInfo[numNameNodes];
System.arraycopy(nameNodes, 0, newlist, 0, nameNodes.length);
nameNodes = newlist;
String nameserviceId = NAMESERVICE_ID_PREFIX + (nnIndex + 1);
String nameserviceIds = conf.get(DFS_NAMESERVICES);
nameserviceIds += "," + nameserviceId;
conf.set(DFS_NAMESERVICES, nameserviceIds);
String nnId = null;
initNameNodeAddress(conf, nameserviceId,
new NNConf(nnId).setIpcPort(namenodePort));
initNameNodeConf(conf, nameserviceId, nnId, true, true, nnIndex);
createNameNode(nnIndex, conf, numDataNodes, true, null, null,
nameserviceId, nnId);
// Refresh datanodes with the newly started namenode
for (DataNodeProperties dn : dataNodes) {
DataNode datanode = dn.datanode;
datanode.refreshNamenodes(conf);
}
// Wait for new namenode to get registrations from all the datanodes
waitActive(nnIndex);
return nameNodes[nnIndex].nameNode;
}
protected void setupDatanodeAddress(Configuration conf, boolean setupHostsFile,
boolean checkDataNodeAddrConfig) throws IOException {
if (setupHostsFile) {
String hostsFile = conf.get(DFS_HOSTS, "").trim();
if (hostsFile.length() == 0) {
throw new IOException("Parameter dfs.hosts is not setup in conf");
}
// Setup datanode in the include file, if it is defined in the conf
String address = "127.0.0.1:" + NetUtils.getFreeSocketPort();
if (checkDataNodeAddrConfig) {
conf.setIfUnset(DFS_DATANODE_ADDRESS_KEY, address);
} else {
conf.set(DFS_DATANODE_ADDRESS_KEY, address);
}
addToFile(hostsFile, address);
LOG.info("Adding datanode " + address + " to hosts file " + hostsFile);
} else {
if (checkDataNodeAddrConfig) {
conf.setIfUnset(DFS_DATANODE_ADDRESS_KEY, "127.0.0.1:0");
} else {
conf.set(DFS_DATANODE_ADDRESS_KEY, "127.0.0.1:0");
}
}
if (checkDataNodeAddrConfig) {
conf.setIfUnset(DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");
conf.setIfUnset(DFS_DATANODE_IPC_ADDRESS_KEY, "127.0.0.1:0");
} else {
conf.set(DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");
conf.set(DFS_DATANODE_IPC_ADDRESS_KEY, "127.0.0.1:0");
}
}
private void addToFile(String p, String address) throws IOException {
File f = new File(p);
f.createNewFile();
PrintWriter writer = new PrintWriter(new FileWriter(f, true));
try {
writer.println(address);
} finally {
writer.close();
}
}
}
| 102,759 | 35.595442 | 109 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepDecreasing.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import java.io.IOException;
import org.junit.Test;
public class TestSetrepDecreasing {
@Test(timeout=120000)
public void testSetrepDecreasing() throws IOException {
TestSetrepIncreasing.setrep(5, 3, false);
}
}
| 1,063 | 34.466667 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationEmpty.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertFalse;
import java.util.ConcurrentModificationException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
import org.junit.Test;
/**
* Test empty file creation.
*/
public class TestFileCreationEmpty {
private boolean isConcurrentModificationException = false;
/**
* This test creates three empty files and lets their leases expire.
* This triggers release of the leases.
* The empty files are supposed to be closed by that
* without causing ConcurrentModificationException.
*/
@Test
public void testLeaseExpireEmptyFiles() throws Exception {
final Thread.UncaughtExceptionHandler oldUEH = Thread.getDefaultUncaughtExceptionHandler();
Thread.setDefaultUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() {
@Override
public void uncaughtException(Thread t, Throwable e) {
if (e instanceof ConcurrentModificationException) {
LeaseManager.LOG.error("t=" + t, e);
isConcurrentModificationException = true;
}
}
});
System.out.println("testLeaseExpireEmptyFiles start");
final long leasePeriod = 1000;
final int DATANODE_NUM = 3;
final Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
// create cluster
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build();
try {
cluster.waitActive();
DistributedFileSystem dfs = cluster.getFileSystem();
// create a new file.
TestFileCreation.createFile(dfs, new Path("/foo"), DATANODE_NUM);
TestFileCreation.createFile(dfs, new Path("/foo2"), DATANODE_NUM);
TestFileCreation.createFile(dfs, new Path("/foo3"), DATANODE_NUM);
// set the soft and hard limit to be 1 second so that the
// namenode triggers lease recovery
cluster.setLeasePeriod(leasePeriod, leasePeriod);
// wait for the lease to expire
try {Thread.sleep(5 * leasePeriod);} catch (InterruptedException e) {}
assertFalse(isConcurrentModificationException);
} finally {
Thread.setDefaultUncaughtExceptionHandler(oldUEH);
cluster.shutdown();
}
}
}
| 3,220 | 36.894118 | 97 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSnapshotCommands.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
/**
* This class includes end-to-end tests for snapshot related FsShell and
* DFSAdmin commands.
*/
public class TestSnapshotCommands {
private static Configuration conf;
private static MiniDFSCluster cluster;
private static DistributedFileSystem fs;
@BeforeClass
public static void clusterSetUp() throws IOException {
conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
fs = cluster.getFileSystem();
}
@AfterClass
public static void clusterShutdown() throws IOException{
if(fs != null){
fs.close();
}
if(cluster != null){
cluster.shutdown();
}
}
@Before
public void setUp() throws IOException {
fs.mkdirs(new Path("/sub1"));
fs.allowSnapshot(new Path("/sub1"));
fs.mkdirs(new Path("/sub1/sub1sub1"));
fs.mkdirs(new Path("/sub1/sub1sub2"));
}
@After
public void tearDown() throws IOException {
if (fs.exists(new Path("/sub1"))) {
if (fs.exists(new Path("/sub1/.snapshot"))) {
for (FileStatus st : fs.listStatus(new Path("/sub1/.snapshot"))) {
fs.deleteSnapshot(new Path("/sub1"), st.getPath().getName());
}
fs.disallowSnapshot(new Path("/sub1"));
}
fs.delete(new Path("/sub1"), true);
}
}
@Test
public void testAllowSnapshot() throws Exception {
// Idempotent test
DFSTestUtil.DFSAdminRun("-allowSnapshot /sub1", 0, "Allowing snaphot on /sub1 succeeded", conf);
// allow normal dir success
DFSTestUtil.FsShellRun("-mkdir /sub2", conf);
DFSTestUtil.DFSAdminRun("-allowSnapshot /sub2", 0, "Allowing snaphot on /sub2 succeeded", conf);
// allow non-exists dir failed
DFSTestUtil.DFSAdminRun("-allowSnapshot /sub3", -1, null, conf);
}
@Test
public void testCreateSnapshot() throws Exception {
// test createSnapshot
DFSTestUtil.FsShellRun("-createSnapshot /sub1 sn0", 0, "Created snapshot /sub1/.snapshot/sn0", conf);
DFSTestUtil.FsShellRun("-createSnapshot /sub1 sn0", 1, "there is already a snapshot with the same name \"sn0\"", conf);
DFSTestUtil.FsShellRun("-rmr /sub1/sub1sub2", conf);
DFSTestUtil.FsShellRun("-mkdir /sub1/sub1sub3", conf);
DFSTestUtil.FsShellRun("-createSnapshot /sub1 sn1", 0, "Created snapshot /sub1/.snapshot/sn1", conf);
// check snapshot contents
DFSTestUtil.FsShellRun("-ls /sub1", 0, "/sub1/sub1sub1", conf);
DFSTestUtil.FsShellRun("-ls /sub1", 0, "/sub1/sub1sub3", conf);
DFSTestUtil.FsShellRun("-ls /sub1/.snapshot", 0, "/sub1/.snapshot/sn0", conf);
DFSTestUtil.FsShellRun("-ls /sub1/.snapshot", 0, "/sub1/.snapshot/sn1", conf);
DFSTestUtil.FsShellRun("-ls /sub1/.snapshot/sn0", 0, "/sub1/.snapshot/sn0/sub1sub1", conf);
DFSTestUtil.FsShellRun("-ls /sub1/.snapshot/sn0", 0, "/sub1/.snapshot/sn0/sub1sub2", conf);
DFSTestUtil.FsShellRun("-ls /sub1/.snapshot/sn1", 0, "/sub1/.snapshot/sn1/sub1sub1", conf);
DFSTestUtil.FsShellRun("-ls /sub1/.snapshot/sn1", 0, "/sub1/.snapshot/sn1/sub1sub3", conf);
}
@Test
public void testMkdirUsingReservedName() throws Exception {
// test can not create dir with reserved name: .snapshot
DFSTestUtil.FsShellRun("-ls /", conf);
DFSTestUtil.FsShellRun("-mkdir /.snapshot", 1, "File exists", conf);
DFSTestUtil.FsShellRun("-mkdir /sub1/.snapshot", 1, "File exists", conf);
// mkdir -p ignore reserved name check if dir already exists
DFSTestUtil.FsShellRun("-mkdir -p /sub1/.snapshot", conf);
DFSTestUtil.FsShellRun("-mkdir -p /sub1/sub1sub1/.snapshot", 1, "mkdir: \".snapshot\" is a reserved name.", conf);
}
@Test
public void testRenameSnapshot() throws Exception {
DFSTestUtil.FsShellRun("-createSnapshot /sub1 sn.orig", conf);
DFSTestUtil.FsShellRun("-renameSnapshot /sub1 sn.orig sn.rename", conf);
DFSTestUtil.FsShellRun("-ls /sub1/.snapshot", 0, "/sub1/.snapshot/sn.rename", conf);
DFSTestUtil.FsShellRun("-ls /sub1/.snapshot/sn.rename", 0, "/sub1/.snapshot/sn.rename/sub1sub1", conf);
DFSTestUtil.FsShellRun("-ls /sub1/.snapshot/sn.rename", 0, "/sub1/.snapshot/sn.rename/sub1sub2", conf);
//try renaming from a non-existing snapshot
DFSTestUtil.FsShellRun("-renameSnapshot /sub1 sn.nonexist sn.rename", 1,
"renameSnapshot: The snapshot sn.nonexist does not exist for directory /sub1", conf);
//try renaming to existing snapshots
DFSTestUtil.FsShellRun("-createSnapshot /sub1 sn.new", conf);
DFSTestUtil.FsShellRun("-renameSnapshot /sub1 sn.new sn.rename", 1,
"renameSnapshot: The snapshot sn.rename already exists for directory /sub1", conf);
DFSTestUtil.FsShellRun("-renameSnapshot /sub1 sn.rename sn.new", 1,
"renameSnapshot: The snapshot sn.new already exists for directory /sub1", conf);
}
@Test
public void testDeleteSnapshot() throws Exception {
DFSTestUtil.FsShellRun("-createSnapshot /sub1 sn1", conf);
DFSTestUtil.FsShellRun("-deleteSnapshot /sub1 sn1", conf);
DFSTestUtil.FsShellRun("-deleteSnapshot /sub1 sn1", 1,
"deleteSnapshot: Cannot delete snapshot sn1 from path /sub1: the snapshot does not exist.", conf);
}
@Test
public void testDisallowSnapshot() throws Exception {
DFSTestUtil.FsShellRun("-createSnapshot /sub1 sn1", conf);
// cannot delete snapshotable dir
DFSTestUtil.FsShellRun("-rmr /sub1", 1, "The directory /sub1 cannot be deleted since /sub1 is snapshottable and already has snapshots", conf);
DFSTestUtil.DFSAdminRun("-disallowSnapshot /sub1", -1,
"disallowSnapshot: The directory /sub1 has snapshot(s). Please redo the operation after removing all the snapshots.", conf);
DFSTestUtil.FsShellRun("-deleteSnapshot /sub1 sn1", conf);
DFSTestUtil.DFSAdminRun("-disallowSnapshot /sub1", 0, "Disallowing snaphot on /sub1 succeeded", conf);
// Idempotent test
DFSTestUtil.DFSAdminRun("-disallowSnapshot /sub1", 0, "Disallowing snaphot on /sub1 succeeded", conf);
// now it can be deleted
DFSTestUtil.FsShellRun("-rmr /sub1", conf);
}
}
| 7,158 | 42.387879 | 146 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.junit.Test;
public class TestSetrepIncreasing {
static void setrep(int fromREP, int toREP, boolean simulatedStorage) throws IOException {
Configuration conf = new HdfsConfiguration();
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
conf.set(DFSConfigKeys.DFS_REPLICATION_KEY, "" + fromREP);
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(10).build();
FileSystem fs = cluster.getFileSystem();
assertTrue("Not a HDFS: "+fs.getUri(), fs instanceof DistributedFileSystem);
try {
Path root = TestDFSShell.mkdir(fs,
new Path("/test/setrep" + fromREP + "-" + toREP));
Path f = TestDFSShell.writeFile(fs, new Path(root, "foo"));
// Verify setrep for changing replication
{
String[] args = {"-setrep", "-w", "" + toREP, "" + f};
FsShell shell = new FsShell();
shell.setConf(conf);
try {
assertEquals(0, shell.run(args));
} catch (Exception e) {
assertTrue("-setrep " + e, false);
}
}
//get fs again since the old one may be closed
fs = cluster.getFileSystem();
FileStatus file = fs.getFileStatus(f);
long len = file.getLen();
for(BlockLocation locations : fs.getFileBlockLocations(file, 0, len)) {
assertTrue(locations.getHosts().length == toREP);
}
TestDFSShell.show("done setrep waiting: " + root);
} finally {
try {fs.close();} catch (Exception e) {}
cluster.shutdown();
}
}
@Test(timeout=120000)
public void testSetrepIncreasing() throws IOException {
setrep(3, 7, false);
}
@Test(timeout=120000)
public void testSetrepIncreasingSimulatedStorage() throws IOException {
setrep(3, 7, true);
}
}
| 3,226 | 36.091954 | 98 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.EOFException;
import java.io.IOException;
import java.io.OutputStream;
import java.net.InetSocketAddress;
import java.net.Socket;
import java.nio.ByteBuffer;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
import org.apache.hadoop.hdfs.protocol.datatransfer.Op;
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck;
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.Builder;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.StringUtils;
import org.junit.Test;
import org.mockito.Mockito;
/**
* This tests data transfer protocol handling in the Datanode. It sends
* various forms of wrong data and verifies that Datanode handles it well.
*/
public class TestDataTransferProtocol {
private static final Log LOG = LogFactory.getLog(
"org.apache.hadoop.hdfs.TestDataTransferProtocol");
private static final DataChecksum DEFAULT_CHECKSUM =
DataChecksum.newDataChecksum(DataChecksum.Type.CRC32C, 512);
DatanodeID datanode;
InetSocketAddress dnAddr;
final ByteArrayOutputStream sendBuf = new ByteArrayOutputStream(128);
final DataOutputStream sendOut = new DataOutputStream(sendBuf);
final Sender sender = new Sender(sendOut);
final ByteArrayOutputStream recvBuf = new ByteArrayOutputStream(128);
final DataOutputStream recvOut = new DataOutputStream(recvBuf);
private void sendRecvData(String testDescription,
boolean eofExpected) throws IOException {
/* Opens a socket to datanode
* sends the data in sendBuf.
* If there is data in expectedBuf, expects to receive the data
* from datanode that matches expectedBuf.
* If there is an exception while recieving, throws it
* only if exceptionExcepted is false.
*/
Socket sock = null;
try {
if ( testDescription != null ) {
LOG.info("Testing : " + testDescription);
}
LOG.info("Going to write:" +
StringUtils.byteToHexString(sendBuf.toByteArray()));
sock = new Socket();
sock.connect(dnAddr, HdfsServerConstants.READ_TIMEOUT);
sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
OutputStream out = sock.getOutputStream();
// Should we excuse
byte[] retBuf = new byte[recvBuf.size()];
DataInputStream in = new DataInputStream(sock.getInputStream());
out.write(sendBuf.toByteArray());
out.flush();
try {
in.readFully(retBuf);
} catch (EOFException eof) {
if ( eofExpected ) {
LOG.info("Got EOF as expected.");
return;
}
throw eof;
}
String received = StringUtils.byteToHexString(retBuf);
String expected = StringUtils.byteToHexString(recvBuf.toByteArray());
LOG.info("Received: " + received);
LOG.info("Expected: " + expected);
if (eofExpected) {
throw new IOException("Did not recieve IOException when an exception " +
"is expected while reading from " + datanode);
}
assertEquals(expected, received);
} finally {
IOUtils.closeSocket(sock);
}
}
void createFile(FileSystem fs, Path path, int fileLen) throws IOException {
byte [] arr = new byte[fileLen];
FSDataOutputStream out = fs.create(path);
out.write(arr);
out.close();
}
void readFile(FileSystem fs, Path path, int fileLen) throws IOException {
byte [] arr = new byte[fileLen];
FSDataInputStream in = fs.open(path);
in.readFully(arr);
}
private void writeZeroLengthPacket(ExtendedBlock block, String description)
throws IOException {
PacketHeader hdr = new PacketHeader(
8, // size of packet
block.getNumBytes(), // OffsetInBlock
100, // sequencenumber
true, // lastPacketInBlock
0, // chunk length
false); // sync block
hdr.write(sendOut);
sendOut.writeInt(0); // zero checksum
//ok finally write a block with 0 len
sendResponse(Status.SUCCESS, "", null, recvOut);
new PipelineAck(100, new int[] {PipelineAck.combineHeader
(PipelineAck.ECN.DISABLED, Status.SUCCESS)}).write
(recvOut);
sendRecvData(description, false);
}
private void sendResponse(Status status, String firstBadLink,
String message,
DataOutputStream out)
throws IOException {
Builder builder = BlockOpResponseProto.newBuilder().setStatus(status);
if (firstBadLink != null) {
builder.setFirstBadLink(firstBadLink);
}
if (message != null) {
builder.setMessage(message);
}
builder.build()
.writeDelimitedTo(out);
}
private void testWrite(ExtendedBlock block, BlockConstructionStage stage, long newGS,
String description, Boolean eofExcepted) throws IOException {
sendBuf.reset();
recvBuf.reset();
writeBlock(block, stage, newGS, DEFAULT_CHECKSUM);
if (eofExcepted) {
sendResponse(Status.ERROR, null, null, recvOut);
sendRecvData(description, true);
} else if (stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
//ok finally write a block with 0 len
sendResponse(Status.SUCCESS, "", null, recvOut);
sendRecvData(description, false);
} else {
writeZeroLengthPacket(block, description);
}
}
@Test
public void testOpWrite() throws IOException {
int numDataNodes = 1;
final long BLOCK_ID_FUDGE = 128;
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
try {
cluster.waitActive();
String poolId = cluster.getNamesystem().getBlockPoolId();
datanode = DataNodeTestUtils.getDNRegistrationForBP(
cluster.getDataNodes().get(0), poolId);
dnAddr = NetUtils.createSocketAddr(datanode.getXferAddr());
FileSystem fileSys = cluster.getFileSystem();
/* Test writing to finalized replicas */
Path file = new Path("dataprotocol.dat");
DFSTestUtil.createFile(fileSys, file, 1L, (short)numDataNodes, 0L);
// get the first blockid for the file
ExtendedBlock firstBlock = DFSTestUtil.getFirstBlock(fileSys, file);
// test PIPELINE_SETUP_CREATE on a finalized block
testWrite(firstBlock, BlockConstructionStage.PIPELINE_SETUP_CREATE, 0L,
"Cannot create an existing block", true);
// test PIPELINE_DATA_STREAMING on a finalized block
testWrite(firstBlock, BlockConstructionStage.DATA_STREAMING, 0L,
"Unexpected stage", true);
// test PIPELINE_SETUP_STREAMING_RECOVERY on an existing block
long newGS = firstBlock.getGenerationStamp() + 1;
testWrite(firstBlock,
BlockConstructionStage.PIPELINE_SETUP_STREAMING_RECOVERY,
newGS, "Cannot recover data streaming to a finalized replica", true);
// test PIPELINE_SETUP_APPEND on an existing block
newGS = firstBlock.getGenerationStamp() + 1;
testWrite(firstBlock,
BlockConstructionStage.PIPELINE_SETUP_APPEND,
newGS, "Append to a finalized replica", false);
firstBlock.setGenerationStamp(newGS);
// test PIPELINE_SETUP_APPEND_RECOVERY on an existing block
file = new Path("dataprotocol1.dat");
DFSTestUtil.createFile(fileSys, file, 1L, (short)numDataNodes, 0L);
firstBlock = DFSTestUtil.getFirstBlock(fileSys, file);
newGS = firstBlock.getGenerationStamp() + 1;
testWrite(firstBlock,
BlockConstructionStage.PIPELINE_SETUP_APPEND_RECOVERY, newGS,
"Recover appending to a finalized replica", false);
// test PIPELINE_CLOSE_RECOVERY on an existing block
file = new Path("dataprotocol2.dat");
DFSTestUtil.createFile(fileSys, file, 1L, (short)numDataNodes, 0L);
firstBlock = DFSTestUtil.getFirstBlock(fileSys, file);
newGS = firstBlock.getGenerationStamp() + 1;
testWrite(firstBlock,
BlockConstructionStage.PIPELINE_CLOSE_RECOVERY, newGS,
"Recover failed close to a finalized replica", false);
firstBlock.setGenerationStamp(newGS);
// Test writing to a new block. Don't choose the next sequential
// block ID to avoid conflicting with IDs chosen by the NN.
long newBlockId = firstBlock.getBlockId() + BLOCK_ID_FUDGE;
ExtendedBlock newBlock = new ExtendedBlock(firstBlock.getBlockPoolId(),
newBlockId, 0, firstBlock.getGenerationStamp());
// test PIPELINE_SETUP_CREATE on a new block
testWrite(newBlock, BlockConstructionStage.PIPELINE_SETUP_CREATE, 0L,
"Create a new block", false);
// test PIPELINE_SETUP_STREAMING_RECOVERY on a new block
newGS = newBlock.getGenerationStamp() + 1;
newBlock.setBlockId(newBlock.getBlockId()+1);
testWrite(newBlock,
BlockConstructionStage.PIPELINE_SETUP_STREAMING_RECOVERY, newGS,
"Recover a new block", true);
// test PIPELINE_SETUP_APPEND on a new block
newGS = newBlock.getGenerationStamp() + 1;
testWrite(newBlock,
BlockConstructionStage.PIPELINE_SETUP_APPEND, newGS,
"Cannot append to a new block", true);
// test PIPELINE_SETUP_APPEND_RECOVERY on a new block
newBlock.setBlockId(newBlock.getBlockId()+1);
newGS = newBlock.getGenerationStamp() + 1;
testWrite(newBlock,
BlockConstructionStage.PIPELINE_SETUP_APPEND_RECOVERY, newGS,
"Cannot append to a new block", true);
/* Test writing to RBW replicas */
Path file1 = new Path("dataprotocol1.dat");
DFSTestUtil.createFile(fileSys, file1, 1L, (short)numDataNodes, 0L);
DFSOutputStream out = (DFSOutputStream)(fileSys.append(file1).
getWrappedStream());
out.write(1);
out.hflush();
FSDataInputStream in = fileSys.open(file1);
firstBlock = DFSTestUtil.getAllBlocks(in).get(0).getBlock();
firstBlock.setNumBytes(2L);
try {
// test PIPELINE_SETUP_CREATE on a RBW block
testWrite(firstBlock, BlockConstructionStage.PIPELINE_SETUP_CREATE, 0L,
"Cannot create a RBW block", true);
// test PIPELINE_SETUP_APPEND on an existing block
newGS = firstBlock.getGenerationStamp() + 1;
testWrite(firstBlock, BlockConstructionStage.PIPELINE_SETUP_APPEND,
newGS, "Cannot append to a RBW replica", true);
// test PIPELINE_SETUP_APPEND on an existing block
testWrite(firstBlock,
BlockConstructionStage.PIPELINE_SETUP_APPEND_RECOVERY,
newGS, "Recover append to a RBW replica", false);
firstBlock.setGenerationStamp(newGS);
// test PIPELINE_SETUP_STREAMING_RECOVERY on a RBW block
file = new Path("dataprotocol2.dat");
DFSTestUtil.createFile(fileSys, file, 1L, (short)numDataNodes, 0L);
out = (DFSOutputStream)(fileSys.append(file).
getWrappedStream());
out.write(1);
out.hflush();
in = fileSys.open(file);
firstBlock = DFSTestUtil.getAllBlocks(in).get(0).getBlock();
firstBlock.setNumBytes(2L);
newGS = firstBlock.getGenerationStamp() + 1;
testWrite(firstBlock,
BlockConstructionStage.PIPELINE_SETUP_STREAMING_RECOVERY,
newGS, "Recover a RBW replica", false);
} finally {
IOUtils.closeStream(in);
IOUtils.closeStream(out);
}
} finally {
cluster.shutdown();
}
}
@Test
public void testDataTransferProtocol() throws IOException {
Random random = new Random();
int oneMil = 1024*1024;
Path file = new Path("dataprotocol.dat");
int numDataNodes = 1;
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, numDataNodes);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
try {
cluster.waitActive();
datanode = cluster.getFileSystem().getDataNodeStats(DatanodeReportType.LIVE)[0];
dnAddr = NetUtils.createSocketAddr(datanode.getXferAddr());
FileSystem fileSys = cluster.getFileSystem();
int fileLen = Math.min(conf.getInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096), 4096);
createFile(fileSys, file, fileLen);
// get the first blockid for the file
final ExtendedBlock firstBlock = DFSTestUtil.getFirstBlock(fileSys, file);
final String poolId = firstBlock.getBlockPoolId();
long newBlockId = firstBlock.getBlockId() + 1;
recvBuf.reset();
sendBuf.reset();
// bad version
recvOut.writeShort((short)(DataTransferProtocol.DATA_TRANSFER_VERSION-1));
sendOut.writeShort((short)(DataTransferProtocol.DATA_TRANSFER_VERSION-1));
sendRecvData("Wrong Version", true);
// bad ops
sendBuf.reset();
sendOut.writeShort((short)DataTransferProtocol.DATA_TRANSFER_VERSION);
sendOut.writeByte(Op.WRITE_BLOCK.code - 1);
sendRecvData("Wrong Op Code", true);
/* Test OP_WRITE_BLOCK */
sendBuf.reset();
DataChecksum badChecksum = Mockito.spy(DEFAULT_CHECKSUM);
Mockito.doReturn(-1).when(badChecksum).getBytesPerChecksum();
writeBlock(poolId, newBlockId, badChecksum);
recvBuf.reset();
sendResponse(Status.ERROR, null, null, recvOut);
sendRecvData("wrong bytesPerChecksum while writing", true);
sendBuf.reset();
recvBuf.reset();
writeBlock(poolId, ++newBlockId, DEFAULT_CHECKSUM);
PacketHeader hdr = new PacketHeader(
4, // size of packet
0, // offset in block,
100, // seqno
false, // last packet
-1 - random.nextInt(oneMil), // bad datalen
false);
hdr.write(sendOut);
sendResponse(Status.SUCCESS, "", null, recvOut);
new PipelineAck(100, new int[] {PipelineAck.combineHeader
(PipelineAck.ECN.DISABLED, Status.ERROR)}).write(recvOut);
sendRecvData("negative DATA_CHUNK len while writing block " + newBlockId,
true);
// test for writing a valid zero size block
sendBuf.reset();
recvBuf.reset();
writeBlock(poolId, ++newBlockId, DEFAULT_CHECKSUM);
hdr = new PacketHeader(
8, // size of packet
0, // OffsetInBlock
100, // sequencenumber
true, // lastPacketInBlock
0, // chunk length
false);
hdr.write(sendOut);
sendOut.writeInt(0); // zero checksum
sendOut.flush();
//ok finally write a block with 0 len
sendResponse(Status.SUCCESS, "", null, recvOut);
new PipelineAck(100, new int[] {PipelineAck.combineHeader
(PipelineAck.ECN.DISABLED, Status.SUCCESS)}).write(recvOut);
sendRecvData("Writing a zero len block blockid " + newBlockId, false);
/* Test OP_READ_BLOCK */
String bpid = cluster.getNamesystem().getBlockPoolId();
ExtendedBlock blk = new ExtendedBlock(bpid, firstBlock.getLocalBlock());
long blkid = blk.getBlockId();
// bad block id
sendBuf.reset();
recvBuf.reset();
blk.setBlockId(blkid-1);
sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl",
0L, fileLen, true, CachingStrategy.newDefaultStrategy());
sendRecvData("Wrong block ID " + newBlockId + " for read", false);
// negative block start offset -1L
sendBuf.reset();
blk.setBlockId(blkid);
sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl",
-1L, fileLen, true, CachingStrategy.newDefaultStrategy());
sendRecvData("Negative start-offset for read for block " +
firstBlock.getBlockId(), false);
// bad block start offset
sendBuf.reset();
sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl",
fileLen, fileLen, true, CachingStrategy.newDefaultStrategy());
sendRecvData("Wrong start-offset for reading block " +
firstBlock.getBlockId(), false);
// negative length is ok. Datanode assumes we want to read the whole block.
recvBuf.reset();
BlockOpResponseProto.newBuilder()
.setStatus(Status.SUCCESS)
.setReadOpChecksumInfo(ReadOpChecksumInfoProto.newBuilder()
.setChecksum(DataTransferProtoUtil.toProto(DEFAULT_CHECKSUM))
.setChunkOffset(0L))
.build()
.writeDelimitedTo(recvOut);
sendBuf.reset();
sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl",
0L, -1L-random.nextInt(oneMil), true,
CachingStrategy.newDefaultStrategy());
sendRecvData("Negative length for reading block " +
firstBlock.getBlockId(), false);
// length is more than size of block.
recvBuf.reset();
sendResponse(Status.ERROR, null,
"opReadBlock " + firstBlock +
" received exception java.io.IOException: " +
"Offset 0 and length 4097 don't match block " + firstBlock + " ( blockLen 4096 )",
recvOut);
sendBuf.reset();
sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl",
0L, fileLen+1, true, CachingStrategy.newDefaultStrategy());
sendRecvData("Wrong length for reading block " +
firstBlock.getBlockId(), false);
//At the end of all this, read the file to make sure that succeeds finally.
sendBuf.reset();
sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl",
0L, fileLen, true, CachingStrategy.newDefaultStrategy());
readFile(fileSys, file, fileLen);
} finally {
cluster.shutdown();
}
}
@Test
public void testPacketHeader() throws IOException {
PacketHeader hdr = new PacketHeader(
4, // size of packet
1024, // OffsetInBlock
100, // sequencenumber
false, // lastPacketInBlock
4096, // chunk length
false);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
hdr.write(new DataOutputStream(baos));
// Read back using DataInput
PacketHeader readBack = new PacketHeader();
ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray());
readBack.readFields(new DataInputStream(bais));
assertEquals(hdr, readBack);
// Read back using ByteBuffer
readBack = new PacketHeader();
readBack.readFields(ByteBuffer.wrap(baos.toByteArray()));
assertEquals(hdr, readBack);
assertTrue(hdr.sanityCheck(99));
assertFalse(hdr.sanityCheck(100));
}
@Test
public void TestPipeLineAckCompatibility() throws IOException {
DataTransferProtos.PipelineAckProto proto = DataTransferProtos
.PipelineAckProto.newBuilder()
.setSeqno(0)
.addReply(Status.CHECKSUM_OK)
.build();
DataTransferProtos.PipelineAckProto newProto = DataTransferProtos
.PipelineAckProto.newBuilder().mergeFrom(proto)
.addFlag(PipelineAck.combineHeader(PipelineAck.ECN.SUPPORTED,
Status.CHECKSUM_OK))
.build();
ByteArrayOutputStream oldAckBytes = new ByteArrayOutputStream();
proto.writeDelimitedTo(oldAckBytes);
PipelineAck oldAck = new PipelineAck();
oldAck.readFields(new ByteArrayInputStream(oldAckBytes.toByteArray()));
assertEquals(PipelineAck.combineHeader(PipelineAck.ECN.DISABLED, Status
.CHECKSUM_OK), oldAck.getHeaderFlag(0));
PipelineAck newAck = new PipelineAck();
ByteArrayOutputStream newAckBytes = new ByteArrayOutputStream();
newProto.writeDelimitedTo(newAckBytes);
newAck.readFields(new ByteArrayInputStream(newAckBytes.toByteArray()));
assertEquals(PipelineAck.combineHeader(PipelineAck.ECN.SUPPORTED, Status
.CHECKSUM_OK), newAck.getHeaderFlag(0));
}
void writeBlock(String poolId, long blockId, DataChecksum checksum) throws IOException {
writeBlock(new ExtendedBlock(poolId, blockId),
BlockConstructionStage.PIPELINE_SETUP_CREATE, 0L, checksum);
}
void writeBlock(ExtendedBlock block, BlockConstructionStage stage,
long newGS, DataChecksum checksum) throws IOException {
sender.writeBlock(block, StorageType.DEFAULT,
BlockTokenSecretManager.DUMMY_TOKEN, "cl",
new DatanodeInfo[1], new StorageType[1], null, stage,
0, block.getNumBytes(), block.getNumBytes(), newGS,
checksum, CachingStrategy.newDefaultStrategy(), false, false, null);
}
}
| 23,099 | 39.455342 | 97 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.PrintStream;
import java.io.RandomAccessFile;
import java.util.Random;
import java.util.concurrent.TimeoutException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck;
import org.apache.hadoop.hdfs.tools.DFSck;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.util.ToolRunner;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
/**
* Class is used to test client reporting corrupted block replica to name node.
* The reporting policy is if block replica is more than one, if all replicas
* are corrupted, client does not report (since the client can handicapped). If
* some of the replicas are corrupted, client reports the corrupted block
* replicas. In case of only one block replica, client always reports corrupted
* replica.
*/
public class TestClientReportBadBlock {
private static final Log LOG = LogFactory
.getLog(TestClientReportBadBlock.class);
static final long BLOCK_SIZE = 64 * 1024;
private static int buffersize;
private static MiniDFSCluster cluster;
private static DistributedFileSystem dfs;
private static final int numDataNodes = 3;
private static final Configuration conf = new HdfsConfiguration();
Random rand = new Random();
@Before
public void startUpCluster() throws IOException {
// disable block scanner
conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
// Set short retry timeouts so this test runs faster
conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes)
.build();
cluster.waitActive();
dfs = cluster.getFileSystem();
buffersize = conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096);
}
@After
public void shutDownCluster() throws IOException {
dfs.close();
cluster.shutdown();
}
/*
* This test creates a file with one block replica. Corrupt the block. Make
* DFSClient read the corrupted file. Corrupted block is expected to be
* reported to name node.
*/
@Test
public void testOneBlockReplica() throws Exception {
final short repl = 1;
final int corruptBlockNumber = 1;
for (int i = 0; i < 2; i++) {
// create a file
String fileName = "/tmp/testClientReportBadBlock/OneBlockReplica" + i;
Path filePath = new Path(fileName);
createAFileWithCorruptedBlockReplicas(filePath, repl, corruptBlockNumber);
if (i == 0) {
dfsClientReadFile(filePath);
} else {
dfsClientReadFileFromPosition(filePath);
}
// the only block replica is corrupted. The LocatedBlock should be marked
// as corrupted. But the corrupted replica is expected to be returned
// when calling Namenode#getBlockLocations() since all(one) replicas are
// corrupted.
int expectedReplicaCount = 1;
verifyCorruptedBlockCount(filePath, expectedReplicaCount);
verifyFirstBlockCorrupted(filePath, true);
verifyFsckBlockCorrupted();
testFsckListCorruptFilesBlocks(filePath, -1);
}
}
/**
* This test creates a file with three block replicas. Corrupt all of the
* replicas. Make dfs client read the file. No block corruption should be
* reported.
*/
@Test
public void testCorruptAllOfThreeReplicas() throws Exception {
final short repl = 3;
final int corruptBlockNumber = 3;
for (int i = 0; i < 2; i++) {
// create a file
String fileName = "/tmp/testClientReportBadBlock/testCorruptAllReplicas"
+ i;
Path filePath = new Path(fileName);
createAFileWithCorruptedBlockReplicas(filePath, repl, corruptBlockNumber);
// ask dfs client to read the file
if (i == 0) {
dfsClientReadFile(filePath);
} else {
dfsClientReadFileFromPosition(filePath);
}
// As all replicas are corrupted. We expect DFSClient does NOT report
// corrupted replicas to the name node.
int expectedReplicasReturned = repl;
verifyCorruptedBlockCount(filePath, expectedReplicasReturned);
// LocatedBlock should not have the block marked as corrupted.
verifyFirstBlockCorrupted(filePath, false);
verifyFsckHealth("");
testFsckListCorruptFilesBlocks(filePath, 0);
}
}
/**
* This test creates a file with three block replicas. Corrupt two of the
* replicas. Make dfs client read the file. The corrupted blocks with their
* owner data nodes should be reported to the name node.
*/
@Test
public void testCorruptTwoOutOfThreeReplicas() throws Exception {
final short repl = 3;
final int corruptBlocReplicas = 2;
for (int i = 0; i < 2; i++) {
String fileName =
"/tmp/testClientReportBadBlock/CorruptTwoOutOfThreeReplicas"+ i;
Path filePath = new Path(fileName);
createAFileWithCorruptedBlockReplicas(filePath, repl, corruptBlocReplicas);
int replicaCount = 0;
/*
* The order of data nodes in LocatedBlock returned by name node is sorted
* by NetworkToplology#pseudoSortByDistance. In current MiniDFSCluster,
* when LocatedBlock is returned, the sorting is based on a random order.
* That is to say, the DFS client and simulated data nodes in mini DFS
* cluster are considered not on the same host nor the same rack.
* Therefore, even we corrupted the first two block replicas based in
* order. When DFSClient read some block replicas, it is not guaranteed
* which block replicas (good/bad) will be returned first. So we try to
* re-read the file until we know the expected replicas numbers is
* returned.
*/
while (replicaCount != repl - corruptBlocReplicas) {
if (i == 0) {
dfsClientReadFile(filePath);
} else {
dfsClientReadFileFromPosition(filePath);
}
LocatedBlocks blocks = dfs.dfs.getNamenode().
getBlockLocations(filePath.toString(), 0, Long.MAX_VALUE);
replicaCount = blocks.get(0).getLocations().length;
}
verifyFirstBlockCorrupted(filePath, false);
int expectedReplicaCount = repl-corruptBlocReplicas;
verifyCorruptedBlockCount(filePath, expectedReplicaCount);
verifyFsckHealth("Target Replicas is 3 but found 1 live replica");
testFsckListCorruptFilesBlocks(filePath, 0);
}
}
/**
* Create a file with one block and corrupt some/all of the block replicas.
*/
private void createAFileWithCorruptedBlockReplicas(Path filePath, short repl,
int corruptBlockCount) throws IOException, AccessControlException,
FileNotFoundException, UnresolvedLinkException, InterruptedException, TimeoutException {
DFSTestUtil.createFile(dfs, filePath, BLOCK_SIZE, repl, 0);
DFSTestUtil.waitReplication(dfs, filePath, repl);
// Locate the file blocks by asking name node
final LocatedBlocks locatedblocks = dfs.dfs.getNamenode()
.getBlockLocations(filePath.toString(), 0L, BLOCK_SIZE);
Assert.assertEquals(repl, locatedblocks.get(0).getLocations().length);
// The file only has one block
LocatedBlock lblock = locatedblocks.get(0);
DatanodeInfo[] datanodeinfos = lblock.getLocations();
ExtendedBlock block = lblock.getBlock();
// corrupt some /all of the block replicas
for (int i = 0; i < corruptBlockCount; i++) {
DatanodeInfo dninfo = datanodeinfos[i];
final DataNode dn = cluster.getDataNode(dninfo.getIpcPort());
corruptBlock(block, dn);
LOG.debug("Corrupted block " + block.getBlockName() + " on data node "
+ dninfo);
}
}
/**
* Verify the first block of the file is corrupted (for all its replica).
*/
private void verifyFirstBlockCorrupted(Path filePath, boolean isCorrupted)
throws AccessControlException, FileNotFoundException,
UnresolvedLinkException, IOException {
final LocatedBlocks locatedBlocks = dfs.dfs.getNamenode()
.getBlockLocations(filePath.toUri().getPath(), 0, Long.MAX_VALUE);
final LocatedBlock firstLocatedBlock = locatedBlocks.get(0);
Assert.assertEquals(isCorrupted, firstLocatedBlock.isCorrupt());
}
/**
* Verify the number of corrupted block replicas by fetching the block
* location from name node.
*/
private void verifyCorruptedBlockCount(Path filePath, int expectedReplicas)
throws AccessControlException, FileNotFoundException,
UnresolvedLinkException, IOException {
final LocatedBlocks lBlocks = dfs.dfs.getNamenode().getBlockLocations(
filePath.toUri().getPath(), 0, Long.MAX_VALUE);
// we expect only the first block of the file is used for this test
LocatedBlock firstLocatedBlock = lBlocks.get(0);
Assert.assertEquals(expectedReplicas,
firstLocatedBlock.getLocations().length);
}
/**
* Ask dfs client to read the file
*/
private void dfsClientReadFile(Path corruptedFile) throws IOException,
UnresolvedLinkException {
DFSInputStream in = dfs.dfs.open(corruptedFile.toUri().getPath());
byte[] buf = new byte[buffersize];
int nRead = 0; // total number of bytes read
try {
do {
nRead = in.read(buf, 0, buf.length);
} while (nRead > 0);
} catch (ChecksumException ce) {
// caught ChecksumException if all replicas are bad, ignore and continue.
LOG.debug("DfsClientReadFile caught ChecksumException.");
} catch (BlockMissingException bme) {
// caught BlockMissingException, ignore.
LOG.debug("DfsClientReadFile caught BlockMissingException.");
}
}
/**
* DFS client read bytes starting from the specified position.
*/
private void dfsClientReadFileFromPosition(Path corruptedFile)
throws UnresolvedLinkException, IOException {
DFSInputStream in = dfs.dfs.open(corruptedFile.toUri().getPath());
byte[] buf = new byte[buffersize];
int startPosition = 2;
int nRead = 0; // total number of bytes read
try {
do {
nRead = in.read(startPosition, buf, 0, buf.length);
startPosition += buf.length;
} while (nRead > 0);
} catch (BlockMissingException bme) {
LOG.debug("DfsClientReadFile caught BlockMissingException.");
}
}
/**
* Corrupt a block on a data node. Replace the block file content with content
* of 1, 2, ...BLOCK_SIZE.
*
* @param block
* the ExtendedBlock to be corrupted
* @param dn
* the data node where the block needs to be corrupted
* @throws FileNotFoundException
* @throws IOException
*/
private static void corruptBlock(final ExtendedBlock block, final DataNode dn)
throws FileNotFoundException, IOException {
final File f = DataNodeTestUtils.getBlockFile(
dn, block.getBlockPoolId(), block.getLocalBlock());
final RandomAccessFile raFile = new RandomAccessFile(f, "rw");
final byte[] bytes = new byte[(int) BLOCK_SIZE];
for (int i = 0; i < BLOCK_SIZE; i++) {
bytes[i] = (byte) (i);
}
raFile.write(bytes);
raFile.close();
}
private static void verifyFsckHealth(String expected) throws Exception {
// Fsck health has error code 0.
// Make sure filesystem is in healthy state
String outStr = runFsck(conf, 0, true, "/");
LOG.info(outStr);
Assert.assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
if (!expected.equals("")) {
Assert.assertTrue(outStr.contains(expected));
}
}
private static void verifyFsckBlockCorrupted() throws Exception {
String outStr = runFsck(conf, 1, true, "/");
LOG.info(outStr);
Assert.assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
}
private static void testFsckListCorruptFilesBlocks(Path filePath, int errorCode) throws Exception{
String outStr = runFsck(conf, errorCode, true, filePath.toString(), "-list-corruptfileblocks");
LOG.info("fsck -list-corruptfileblocks out: " + outStr);
if (errorCode != 0) {
Assert.assertTrue(outStr.contains("CORRUPT files"));
}
}
static String runFsck(Configuration conf, int expectedErrCode,
boolean checkErrorCode, String... path) throws Exception {
ByteArrayOutputStream bStream = new ByteArrayOutputStream();
PrintStream out = new PrintStream(bStream, true);
int errCode = ToolRunner.run(new DFSck(conf, out), path);
if (checkErrorCode)
Assert.assertEquals(expectedErrCode, errCode);
return bStream.toString();
}
}
| 14,178 | 39.053672 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/LogVerificationAppender.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import java.util.ArrayList;
import java.util.List;
import org.apache.log4j.AppenderSkeleton;
import org.apache.log4j.spi.LoggingEvent;
import org.apache.log4j.spi.ThrowableInformation;
/**
* Used to verify that certain exceptions or messages are present in log output.
*/
public class LogVerificationAppender extends AppenderSkeleton {
private final List<LoggingEvent> log = new ArrayList<LoggingEvent>();
@Override
public boolean requiresLayout() {
return false;
}
@Override
protected void append(final LoggingEvent loggingEvent) {
log.add(loggingEvent);
}
@Override
public void close() {
}
public List<LoggingEvent> getLog() {
return new ArrayList<LoggingEvent>(log);
}
public int countExceptionsWithMessage(final String text) {
int count = 0;
for (LoggingEvent e: getLog()) {
ThrowableInformation t = e.getThrowableInformation();
if (t != null) {
String m = t.getThrowable().getMessage();
if (m.contains(text)) {
count++;
}
}
}
return count;
}
public int countLinesWithMessage(final String text) {
int count = 0;
for (LoggingEvent e: getLog()) {
String msg = e.getRenderedMessage();
if (msg != null && msg.contains(text)) {
count++;
}
}
return count;
}
}
| 2,164 | 27.486842 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeLayoutUpgrade.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import org.apache.hadoop.conf.Configuration;
import org.junit.Test;
import java.io.File;
import java.io.IOException;
public class TestDatanodeLayoutUpgrade {
private static final String HADOOP_DATANODE_DIR_TXT =
"hadoop-datanode-dir.txt";
private static final String HADOOP24_DATANODE = "hadoop-24-datanode-dir.tgz";
@Test
// Upgrade from LDir-based layout to block ID-based layout -- change described
// in HDFS-6482
public void testUpgradeToIdBasedLayout() throws IOException {
TestDFSUpgradeFromImage upgrade = new TestDFSUpgradeFromImage();
upgrade.unpackStorage(HADOOP24_DATANODE, HADOOP_DATANODE_DIR_TXT);
Configuration conf = new Configuration(TestDFSUpgradeFromImage.upgradeConf);
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
new File(System.getProperty("test.build.data"),
"dfs" + File.separator + "data").toURI().toString());
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
new File(System.getProperty("test.build.data"),
"dfs" + File.separator + "name").toURI().toString());
upgrade.upgradeAndVerify(new MiniDFSCluster.Builder(conf).numDataNodes(1)
.manageDataDfsDirs(false).manageNameDfsDirs(false), null);
}
}
| 2,062 | 41.102041 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.Random;
import java.util.concurrent.TimeoutException;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.web.HftpFileSystem;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.log4j.Level;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
/**
* This class tests the FileStatus API.
*/
public class TestFileStatus {
{
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger)FileSystem.LOG).getLogger().setLevel(Level.ALL);
}
static final long seed = 0xDEADBEEFL;
static final int blockSize = 8192;
static final int fileSize = 16384;
private static Configuration conf;
private static MiniDFSCluster cluster;
private static FileSystem fs;
private static FileContext fc;
private static HftpFileSystem hftpfs;
private static DFSClient dfsClient;
private static Path file1;
@BeforeClass
public static void testSetUp() throws Exception {
conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_LIST_LIMIT, 2);
cluster = new MiniDFSCluster.Builder(conf).build();
fs = cluster.getFileSystem();
fc = FileContext.getFileContext(cluster.getURI(0), conf);
hftpfs = cluster.getHftpFileSystem(0);
dfsClient = new DFSClient(NameNode.getAddress(conf), conf);
file1 = new Path("filestatus.dat");
writeFile(fs, file1, 1, fileSize, blockSize);
}
@AfterClass
public static void testTearDown() throws Exception {
fs.close();
cluster.shutdown();
}
private static void writeFile(FileSystem fileSys, Path name, int repl,
int fileSize, int blockSize) throws IOException {
// Create and write a file that contains three blocks of data
FSDataOutputStream stm = fileSys.create(name, true,
DFSUtil.getIoFileBufferSize(conf), (short)repl, (long)blockSize);
byte[] buffer = new byte[fileSize];
Random rand = new Random(seed);
rand.nextBytes(buffer);
stm.write(buffer);
stm.close();
}
private void checkFile(FileSystem fileSys, Path name, int repl)
throws IOException, InterruptedException, TimeoutException {
DFSTestUtil.waitReplication(fileSys, name, (short) repl);
}
/** Test calling getFileInfo directly on the client */
@Test
public void testGetFileInfo() throws IOException {
// Check that / exists
Path path = new Path("/");
assertTrue("/ should be a directory",
fs.getFileStatus(path).isDirectory());
// Make sure getFileInfo returns null for files which do not exist
HdfsFileStatus fileInfo = dfsClient.getFileInfo("/noSuchFile");
assertEquals("Non-existant file should result in null", null, fileInfo);
Path path1 = new Path("/name1");
Path path2 = new Path("/name1/name2");
assertTrue(fs.mkdirs(path1));
FSDataOutputStream out = fs.create(path2, false);
out.close();
fileInfo = dfsClient.getFileInfo(path1.toString());
assertEquals(1, fileInfo.getChildrenNum());
fileInfo = dfsClient.getFileInfo(path2.toString());
assertEquals(0, fileInfo.getChildrenNum());
// Test getFileInfo throws the right exception given a non-absolute path.
try {
dfsClient.getFileInfo("non-absolute");
fail("getFileInfo for a non-absolute path did not throw IOException");
} catch (RemoteException re) {
assertTrue("Wrong exception for invalid file name",
re.toString().contains("Invalid file name"));
}
}
/** Test the FileStatus obtained calling getFileStatus on a file */
@Test
public void testGetFileStatusOnFile() throws Exception {
checkFile(fs, file1, 1);
// test getFileStatus on a file
FileStatus status = fs.getFileStatus(file1);
assertFalse(file1 + " should be a file", status.isDirectory());
assertEquals(blockSize, status.getBlockSize());
assertEquals(1, status.getReplication());
assertEquals(fileSize, status.getLen());
assertEquals(file1.makeQualified(fs.getUri(),
fs.getWorkingDirectory()).toString(),
status.getPath().toString());
}
/** Test the FileStatus obtained calling listStatus on a file */
@Test
public void testListStatusOnFile() throws IOException {
FileStatus[] stats = fs.listStatus(file1);
assertEquals(1, stats.length);
FileStatus status = stats[0];
assertFalse(file1 + " should be a file", status.isDirectory());
assertEquals(blockSize, status.getBlockSize());
assertEquals(1, status.getReplication());
assertEquals(fileSize, status.getLen());
assertEquals(file1.makeQualified(fs.getUri(),
fs.getWorkingDirectory()).toString(),
status.getPath().toString());
RemoteIterator<FileStatus> itor = fc.listStatus(file1);
status = itor.next();
assertEquals(stats[0], status);
assertFalse(file1 + " should be a file", status.isDirectory());
}
/** Test getting a FileStatus object using a non-existant path */
@Test
public void testGetFileStatusOnNonExistantFileDir() throws IOException {
Path dir = new Path("/test/mkdirs");
try {
fs.listStatus(dir);
fail("listStatus of non-existent path should fail");
} catch (FileNotFoundException fe) {
assertEquals("File " + dir + " does not exist.",fe.getMessage());
}
try {
fc.listStatus(dir);
fail("listStatus of non-existent path should fail");
} catch (FileNotFoundException fe) {
assertEquals("File " + dir + " does not exist.", fe.getMessage());
}
try {
fs.getFileStatus(dir);
fail("getFileStatus of non-existent path should fail");
} catch (FileNotFoundException fe) {
assertTrue("Exception doesn't indicate non-existant path",
fe.getMessage().startsWith("File does not exist"));
}
}
/** Test FileStatus objects obtained from a directory */
@Test
public void testGetFileStatusOnDir() throws Exception {
// Create the directory
Path dir = new Path("/test/mkdirs");
assertTrue("mkdir failed", fs.mkdirs(dir));
assertTrue("mkdir failed", fs.exists(dir));
// test getFileStatus on an empty directory
FileStatus status = fs.getFileStatus(dir);
assertTrue(dir + " should be a directory", status.isDirectory());
assertTrue(dir + " should be zero size ", status.getLen() == 0);
assertEquals(dir.makeQualified(fs.getUri(),
fs.getWorkingDirectory()).toString(),
status.getPath().toString());
// test listStatus on an empty directory
FileStatus[] stats = fs.listStatus(dir);
assertEquals(dir + " should be empty", 0, stats.length);
assertEquals(dir + " should be zero size ",
0, fs.getContentSummary(dir).getLength());
assertEquals(dir + " should be zero size using hftp",
0, hftpfs.getContentSummary(dir).getLength());
RemoteIterator<FileStatus> itor = fc.listStatus(dir);
assertFalse(dir + " should be empty", itor.hasNext());
itor = fs.listStatusIterator(dir);
assertFalse(dir + " should be empty", itor.hasNext());
// create another file that is smaller than a block.
Path file2 = new Path(dir, "filestatus2.dat");
writeFile(fs, file2, 1, blockSize/4, blockSize);
checkFile(fs, file2, 1);
// verify file attributes
status = fs.getFileStatus(file2);
assertEquals(blockSize, status.getBlockSize());
assertEquals(1, status.getReplication());
file2 = fs.makeQualified(file2);
assertEquals(file2.toString(), status.getPath().toString());
// Create another file in the same directory
Path file3 = new Path(dir, "filestatus3.dat");
writeFile(fs, file3, 1, blockSize/4, blockSize);
checkFile(fs, file3, 1);
file3 = fs.makeQualified(file3);
// Verify that the size of the directory increased by the size
// of the two files
final int expected = blockSize/2;
assertEquals(dir + " size should be " + expected,
expected, fs.getContentSummary(dir).getLength());
assertEquals(dir + " size should be " + expected + " using hftp",
expected, hftpfs.getContentSummary(dir).getLength());
// Test listStatus on a non-empty directory
stats = fs.listStatus(dir);
assertEquals(dir + " should have two entries", 2, stats.length);
assertEquals(file2.toString(), stats[0].getPath().toString());
assertEquals(file3.toString(), stats[1].getPath().toString());
itor = fc.listStatus(dir);
assertEquals(file2.toString(), itor.next().getPath().toString());
assertEquals(file3.toString(), itor.next().getPath().toString());
assertFalse("Unexpected addtional file", itor.hasNext());
itor = fs.listStatusIterator(dir);
assertEquals(file2.toString(), itor.next().getPath().toString());
assertEquals(file3.toString(), itor.next().getPath().toString());
assertFalse("Unexpected addtional file", itor.hasNext());
// Test iterative listing. Now dir has 2 entries, create one more.
Path dir3 = fs.makeQualified(new Path(dir, "dir3"));
fs.mkdirs(dir3);
dir3 = fs.makeQualified(dir3);
stats = fs.listStatus(dir);
assertEquals(dir + " should have three entries", 3, stats.length);
assertEquals(dir3.toString(), stats[0].getPath().toString());
assertEquals(file2.toString(), stats[1].getPath().toString());
assertEquals(file3.toString(), stats[2].getPath().toString());
itor = fc.listStatus(dir);
assertEquals(dir3.toString(), itor.next().getPath().toString());
assertEquals(file2.toString(), itor.next().getPath().toString());
assertEquals(file3.toString(), itor.next().getPath().toString());
assertFalse("Unexpected addtional file", itor.hasNext());
itor = fs.listStatusIterator(dir);
assertEquals(dir3.toString(), itor.next().getPath().toString());
assertEquals(file2.toString(), itor.next().getPath().toString());
assertEquals(file3.toString(), itor.next().getPath().toString());
assertFalse("Unexpected addtional file", itor.hasNext());
// Now dir has 3 entries, create two more
Path dir4 = fs.makeQualified(new Path(dir, "dir4"));
fs.mkdirs(dir4);
dir4 = fs.makeQualified(dir4);
Path dir5 = fs.makeQualified(new Path(dir, "dir5"));
fs.mkdirs(dir5);
dir5 = fs.makeQualified(dir5);
stats = fs.listStatus(dir);
assertEquals(dir + " should have five entries", 5, stats.length);
assertEquals(dir3.toString(), stats[0].getPath().toString());
assertEquals(dir4.toString(), stats[1].getPath().toString());
assertEquals(dir5.toString(), stats[2].getPath().toString());
assertEquals(file2.toString(), stats[3].getPath().toString());
assertEquals(file3.toString(), stats[4].getPath().toString());
itor = fc.listStatus(dir);
assertEquals(dir3.toString(), itor.next().getPath().toString());
assertEquals(dir4.toString(), itor.next().getPath().toString());
assertEquals(dir5.toString(), itor.next().getPath().toString());
assertEquals(file2.toString(), itor.next().getPath().toString());
assertEquals(file3.toString(), itor.next().getPath().toString());
assertFalse(itor.hasNext());
itor = fs.listStatusIterator(dir);
assertEquals(dir3.toString(), itor.next().getPath().toString());
assertEquals(dir4.toString(), itor.next().getPath().toString());
assertEquals(dir5.toString(), itor.next().getPath().toString());
assertEquals(file2.toString(), itor.next().getPath().toString());
assertEquals(file3.toString(), itor.next().getPath().toString());
assertFalse(itor.hasNext());
{ //test permission error on hftp
fs.setPermission(dir, new FsPermission((short)0));
try {
final String username = UserGroupInformation.getCurrentUser().getShortUserName() + "1";
final HftpFileSystem hftp2 = cluster.getHftpFileSystemAs(username, conf, 0, "somegroup");
hftp2.getContentSummary(dir);
fail();
} catch(IOException ioe) {
FileSystem.LOG.info("GOOD: getting an exception", ioe);
}
}
fs.delete(dir, true);
}
}
| 13,811 | 39.268222 | 97 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.lang.reflect.Field;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.Socket;
import java.net.SocketAddress;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.List;
import javax.net.SocketFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider;
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
import org.apache.hadoop.hdfs.server.namenode.ha.IPFailoverProxyProvider;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.retry.FailoverProxyProvider;
import org.apache.hadoop.net.ConnectTimeoutException;
import org.apache.hadoop.net.StandardSocketFactory;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.StringUtils;
import org.hamcrest.BaseMatcher;
import org.hamcrest.Description;
import org.junit.After;
import org.junit.Assume;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
import sun.net.spi.nameservice.NameService;
public class TestDFSClientFailover {
private static final Log LOG = LogFactory.getLog(TestDFSClientFailover.class);
private static final Path TEST_FILE = new Path("/tmp/failover-test-file");
private static final int FILE_LENGTH_TO_VERIFY = 100;
private final Configuration conf = new Configuration();
private MiniDFSCluster cluster;
@Before
public void setUpCluster() throws IOException {
cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleHATopology())
.build();
cluster.transitionToActive(0);
cluster.waitActive();
}
@After
public void tearDownCluster() throws IOException {
if (cluster != null) {
cluster.shutdown();
}
}
@After
public void clearConfig() {
SecurityUtil.setTokenServiceUseIp(true);
}
/**
* Make sure that client failover works when an active NN dies and the standby
* takes over.
*/
@Test
public void testDfsClientFailover() throws IOException, URISyntaxException {
FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
DFSTestUtil.createFile(fs, TEST_FILE,
FILE_LENGTH_TO_VERIFY, (short)1, 1L);
assertEquals(fs.getFileStatus(TEST_FILE).getLen(), FILE_LENGTH_TO_VERIFY);
cluster.shutdownNameNode(0);
cluster.transitionToActive(1);
assertEquals(fs.getFileStatus(TEST_FILE).getLen(), FILE_LENGTH_TO_VERIFY);
// Check that it functions even if the URL becomes canonicalized
// to include a port number.
Path withPort = new Path("hdfs://" +
HATestUtil.getLogicalHostname(cluster) + ":" +
NameNode.DEFAULT_PORT + "/" + TEST_FILE.toUri().getPath());
FileSystem fs2 = withPort.getFileSystem(fs.getConf());
assertTrue(fs2.exists(withPort));
fs.close();
}
/**
* Test that even a non-idempotent method will properly fail-over if the
* first IPC attempt times out trying to connect. Regression test for
* HDFS-4404.
*/
@Test
public void testFailoverOnConnectTimeout() throws Exception {
conf.setClass(CommonConfigurationKeysPublic.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY,
InjectingSocketFactory.class, SocketFactory.class);
// Set up the InjectingSocketFactory to throw a ConnectTimeoutException
// when connecting to the first NN.
InjectingSocketFactory.portToInjectOn = cluster.getNameNodePort(0);
FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
// Make the second NN the active one.
cluster.shutdownNameNode(0);
cluster.transitionToActive(1);
// Call a non-idempotent method, and ensure the failover of the call proceeds
// successfully.
IOUtils.closeStream(fs.create(TEST_FILE));
}
private static class InjectingSocketFactory extends StandardSocketFactory {
static final SocketFactory defaultFactory = SocketFactory.getDefault();
static int portToInjectOn;
@Override
public Socket createSocket() throws IOException {
Socket spy = Mockito.spy(defaultFactory.createSocket());
// Simplify our spying job by not having to also spy on the channel
Mockito.doReturn(null).when(spy).getChannel();
// Throw a ConnectTimeoutException when connecting to our target "bad"
// host.
Mockito.doThrow(new ConnectTimeoutException("injected"))
.when(spy).connect(
Mockito.argThat(new MatchesPort()),
Mockito.anyInt());
return spy;
}
private class MatchesPort extends BaseMatcher<SocketAddress> {
@Override
public boolean matches(Object arg0) {
return ((InetSocketAddress)arg0).getPort() == portToInjectOn;
}
@Override
public void describeTo(Description desc) {
desc.appendText("matches port " + portToInjectOn);
}
}
}
/**
* Regression test for HDFS-2683.
*/
@Test
public void testLogicalUriShouldNotHavePorts() {
Configuration config = new HdfsConfiguration(conf);
String logicalName = HATestUtil.getLogicalHostname(cluster);
HATestUtil.setFailoverConfigurations(cluster, config, logicalName);
Path p = new Path("hdfs://" + logicalName + ":12345/");
try {
p.getFileSystem(config).exists(p);
fail("Did not fail with fake FS");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains(
"does not use port information", ioe);
}
}
/**
* Make sure that a helpful error message is shown if a proxy provider is
* configured for a given URI, but no actual addresses are configured for that
* URI.
*/
@Test
public void testFailureWithMisconfiguredHaNNs() throws Exception {
String logicalHost = "misconfigured-ha-uri";
Configuration conf = new Configuration();
conf.set(HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX + "." + logicalHost,
ConfiguredFailoverProxyProvider.class.getName());
URI uri = new URI("hdfs://" + logicalHost + "/test");
try {
FileSystem.get(uri, conf).exists(new Path("/test"));
fail("Successfully got proxy provider for misconfigured FS");
} catch (IOException ioe) {
LOG.info("got expected exception", ioe);
assertTrue("expected exception did not contain helpful message",
StringUtils.stringifyException(ioe).contains(
"Could not find any configured addresses for URI " + uri));
}
}
/**
* Spy on the Java DNS infrastructure.
* This likely only works on Sun-derived JDKs, but uses JUnit's
* Assume functionality so that any tests using it are skipped on
* incompatible JDKs.
*/
private NameService spyOnNameService() {
try {
Field f = InetAddress.class.getDeclaredField("nameServices");
f.setAccessible(true);
Assume.assumeNotNull(f);
@SuppressWarnings("unchecked")
List<NameService> nsList = (List<NameService>) f.get(null);
NameService ns = nsList.get(0);
Log log = LogFactory.getLog("NameServiceSpy");
ns = Mockito.mock(NameService.class,
new GenericTestUtils.DelegateAnswer(log, ns));
nsList.set(0, ns);
return ns;
} catch (Throwable t) {
LOG.info("Unable to spy on DNS. Skipping test.", t);
// In case the JDK we're testing on doesn't work like Sun's, just
// skip the test.
Assume.assumeNoException(t);
throw new RuntimeException(t);
}
}
/**
* Test that the client doesn't ever try to DNS-resolve the logical URI.
* Regression test for HADOOP-9150.
*/
@Test
public void testDoesntDnsResolveLogicalURI() throws Exception {
FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
NameService spyNS = spyOnNameService();
String logicalHost = fs.getUri().getHost();
Path qualifiedRoot = fs.makeQualified(new Path("/"));
// Make a few calls against the filesystem.
fs.getCanonicalServiceName();
fs.listStatus(qualifiedRoot);
// Ensure that the logical hostname was never resolved.
Mockito.verify(spyNS, Mockito.never()).lookupAllHostAddr(Mockito.eq(logicalHost));
}
/**
* Same test as above, but for FileContext.
*/
@Test
public void testFileContextDoesntDnsResolveLogicalURI() throws Exception {
FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
NameService spyNS = spyOnNameService();
String logicalHost = fs.getUri().getHost();
Configuration haClientConf = fs.getConf();
FileContext fc = FileContext.getFileContext(haClientConf);
Path root = new Path("/");
fc.listStatus(root);
fc.listStatus(fc.makeQualified(root));
fc.getDefaultFileSystem().getCanonicalServiceName();
// Ensure that the logical hostname was never resolved.
Mockito.verify(spyNS, Mockito.never()).lookupAllHostAddr(Mockito.eq(logicalHost));
}
/** Dummy implementation of plain FailoverProxyProvider */
public static class DummyLegacyFailoverProxyProvider<T>
implements FailoverProxyProvider<T> {
private Class<T> xface;
private T proxy;
public DummyLegacyFailoverProxyProvider(Configuration conf, URI uri,
Class<T> xface) {
try {
this.proxy = NameNodeProxies.createNonHAProxy(conf,
NameNode.getAddress(uri), xface,
UserGroupInformation.getCurrentUser(), false).getProxy();
this.xface = xface;
} catch (IOException ioe) {
}
}
@Override
public Class<T> getInterface() {
return xface;
}
@Override
public ProxyInfo<T> getProxy() {
return new ProxyInfo<T>(proxy, "dummy");
}
@Override
public void performFailover(T currentProxy) {
}
@Override
public void close() throws IOException {
}
}
/**
* Test to verify legacy proxy providers are correctly wrapped.
*/
@Test
public void testWrappedFailoverProxyProvider() throws Exception {
// setup the config with the dummy provider class
Configuration config = new HdfsConfiguration(conf);
String logicalName = HATestUtil.getLogicalHostname(cluster);
HATestUtil.setFailoverConfigurations(cluster, config, logicalName);
config.set(HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX + "." + logicalName,
DummyLegacyFailoverProxyProvider.class.getName());
Path p = new Path("hdfs://" + logicalName + "/");
// not to use IP address for token service
SecurityUtil.setTokenServiceUseIp(false);
// Logical URI should be used.
assertTrue("Legacy proxy providers should use logical URI.",
HAUtil.useLogicalUri(config, p.toUri()));
}
/**
* Test to verify IPFailoverProxyProvider is not requiring logical URI.
*/
@Test
public void testIPFailoverProxyProviderLogicalUri() throws Exception {
// setup the config with the IP failover proxy provider class
Configuration config = new HdfsConfiguration(conf);
URI nnUri = cluster.getURI(0);
config.set(HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX + "." +
nnUri.getHost(),
IPFailoverProxyProvider.class.getName());
assertFalse("IPFailoverProxyProvider should not use logical URI.",
HAUtil.useLogicalUri(config, nnUri));
}
}
| 12,752 | 33.844262 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderBase.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import java.io.IOException;
import java.util.Random;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
abstract public class TestBlockReaderBase {
private BlockReaderTestUtil util;
private byte[] blockData;
private BlockReader reader;
/**
* if override this, make sure return array length is less than
* block size.
*/
byte [] getBlockData() {
int length = 1 << 22;
byte[] data = new byte[length];
for (int i = 0; i < length; i++) {
data[i] = (byte) (i % 133);
}
return data;
}
private BlockReader getBlockReader(LocatedBlock block) throws Exception {
return util.getBlockReader(block, 0, blockData.length);
}
abstract HdfsConfiguration createConf();
@Before
public void setup() throws Exception {
util = new BlockReaderTestUtil(1, createConf());
blockData = getBlockData();
DistributedFileSystem fs = util.getCluster().getFileSystem();
Path testfile = new Path("/testfile");
FSDataOutputStream fout = fs.create(testfile);
fout.write(blockData);
fout.close();
LocatedBlock blk = util.getFileBlocks(testfile, blockData.length).get(0);
reader = getBlockReader(blk);
}
@After
public void shutdown() throws Exception {
util.shutdown();
}
@Test(timeout=60000)
public void testSkip() throws IOException {
Random random = new Random();
byte [] buf = new byte[1];
for (int pos = 0; pos < blockData.length;) {
long skip = random.nextInt(100) + 1;
long skipped = reader.skip(skip);
if (pos + skip >= blockData.length) {
assertEquals(blockData.length, pos + skipped);
break;
} else {
assertEquals(skip, skipped);
pos += skipped;
assertEquals(1, reader.read(buf, 0, 1));
assertEquals(blockData[pos], buf[0]);
pos += 1;
}
}
}
}
| 2,884 | 29.368421 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadWhileWriting.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import java.io.IOException;
import java.io.OutputStream;
import java.security.PrivilegedExceptionAction;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.log4j.Level;
import org.junit.Assert;
import org.junit.Test;
/** Test reading from hdfs while a file is being written. */
public class TestReadWhileWriting {
{
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
}
private static final String DIR = "/"
+ TestReadWhileWriting.class.getSimpleName() + "/";
private static final int BLOCK_SIZE = 8192;
// soft limit is short and hard limit is long, to test that
// another thread can lease file after soft limit expired
private static final long SOFT_LEASE_LIMIT = 500;
private static final long HARD_LEASE_LIMIT = 1000*600;
/** Test reading while writing. */
@Test
public void pipeline_02_03() throws Exception {
final Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
// create cluster
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(4).build();
try {
//change the lease limits.
cluster.setLeasePeriod(SOFT_LEASE_LIMIT, HARD_LEASE_LIMIT);
//wait for the cluster
cluster.waitActive();
final FileSystem fs = cluster.getFileSystem();
final Path p = new Path(DIR, "file1");
final int half = BLOCK_SIZE/2;
//a. On Machine M1, Create file. Write half block of data.
// Invoke DFSOutputStream.hflush() on the dfs file handle.
// Do not close file yet.
{
final FSDataOutputStream out = fs.create(p, true,
fs.getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
(short)3, BLOCK_SIZE);
write(out, 0, half);
//hflush
((DFSOutputStream)out.getWrappedStream()).hflush();
}
//b. On another machine M2, open file and verify that the half-block
// of data can be read successfully.
checkFile(p, half, conf);
AppendTestUtil.LOG.info("leasechecker.interruptAndJoin()");
((DistributedFileSystem)fs).dfs.getLeaseRenewer().interruptAndJoin();
//c. On M1, append another half block of data. Close file on M1.
{
//sleep to let the lease is expired.
Thread.sleep(2*SOFT_LEASE_LIMIT);
final UserGroupInformation current = UserGroupInformation.getCurrentUser();
final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
current.getShortUserName() + "x", new String[]{"supergroup"});
final DistributedFileSystem dfs = ugi.doAs(
new PrivilegedExceptionAction<DistributedFileSystem>() {
@Override
public DistributedFileSystem run() throws Exception {
return (DistributedFileSystem)FileSystem.newInstance(conf);
}
});
final FSDataOutputStream out = append(dfs, p);
write(out, 0, half);
out.close();
}
//d. On M2, open file and read 1 block of data from it. Close file.
checkFile(p, 2*half, conf);
} finally {
cluster.shutdown();
}
}
/** Try openning a file for append. */
private static FSDataOutputStream append(FileSystem fs, Path p) throws Exception {
for(int i = 0; i < 10; i++) {
try {
return fs.append(p);
} catch(RemoteException re) {
if (re.getClassName().equals(RecoveryInProgressException.class.getName())) {
AppendTestUtil.LOG.info("Will sleep and retry, i=" + i +", p="+p, re);
Thread.sleep(1000);
}
else
throw re;
}
}
throw new IOException("Cannot append to " + p);
}
static private int userCount = 0;
//check the file
static void checkFile(Path p, int expectedsize, final Configuration conf
) throws IOException, InterruptedException {
//open the file with another user account
final String username = UserGroupInformation.getCurrentUser().getShortUserName()
+ "_" + ++userCount;
UserGroupInformation ugi = UserGroupInformation.createUserForTesting(username,
new String[] {"supergroup"});
final FileSystem fs = DFSTestUtil.getFileSystemAs(ugi, conf);
final HdfsDataInputStream in = (HdfsDataInputStream)fs.open(p);
//Check visible length
Assert.assertTrue(in.getVisibleLength() >= expectedsize);
//Able to read?
for(int i = 0; i < expectedsize; i++) {
Assert.assertEquals((byte)i, (byte)in.read());
}
in.close();
}
/** Write something to a file */
private static void write(OutputStream out, int offset, int length
) throws IOException {
final byte[] bytes = new byte[length];
for(int i = 0; i < length; i++) {
bytes[i] = (byte)(offset + i);
}
out.write(bytes);
}
}
| 6,374 | 35.637931 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import java.io.File;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.util.concurrent.ThreadLocalRandom;
import javax.management.AttributeNotFoundException;
import javax.management.InstanceNotFoundException;
import javax.management.MBeanException;
import javax.management.MBeanServer;
import javax.management.MalformedObjectNameException;
import javax.management.ObjectName;
import javax.management.ReflectionException;
import javax.management.openmbean.CompositeDataSupport;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster;
import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.FSImage;
import org.apache.hadoop.hdfs.server.namenode.NNStorage;
import org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode;
import org.apache.hadoop.hdfs.server.namenode.TestFileTruncate;
import org.apache.hadoop.hdfs.tools.DFSAdmin;
import org.apache.hadoop.io.IOUtils;
import org.junit.Assert;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertNull;
/**
* This class tests rolling upgrade.
*/
public class TestRollingUpgrade {
private static final Log LOG = LogFactory.getLog(TestRollingUpgrade.class);
public static void runCmd(DFSAdmin dfsadmin, boolean success,
String... args) throws Exception {
if (success) {
assertEquals(0, dfsadmin.run(args));
} else {
Assert.assertTrue(dfsadmin.run(args) != 0);
}
}
/**
* Test DFSAdmin Upgrade Command.
*/
@Test
public void testDFSAdminRollingUpgradeCommands() throws Exception {
// start a cluster
final Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
final Path foo = new Path("/foo");
final Path bar = new Path("/bar");
final Path baz = new Path("/baz");
{
final DistributedFileSystem dfs = cluster.getFileSystem();
final DFSAdmin dfsadmin = new DFSAdmin(conf);
dfs.mkdirs(foo);
//illegal argument "abc" to rollingUpgrade option
runCmd(dfsadmin, false, "-rollingUpgrade", "abc");
checkMxBeanIsNull();
//query rolling upgrade
runCmd(dfsadmin, true, "-rollingUpgrade");
//start rolling upgrade
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
runCmd(dfsadmin, true, "-rollingUpgrade", "prepare");
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
//query rolling upgrade
runCmd(dfsadmin, true, "-rollingUpgrade", "query");
checkMxBean();
dfs.mkdirs(bar);
//finalize rolling upgrade
runCmd(dfsadmin, true, "-rollingUpgrade", "finalize");
// RollingUpgradeInfo should be null after finalization, both via
// Java API and in JMX
assertNull(dfs.rollingUpgrade(RollingUpgradeAction.QUERY));
checkMxBeanIsNull();
dfs.mkdirs(baz);
runCmd(dfsadmin, true, "-rollingUpgrade");
// All directories created before upgrade, when upgrade in progress and
// after upgrade finalize exists
Assert.assertTrue(dfs.exists(foo));
Assert.assertTrue(dfs.exists(bar));
Assert.assertTrue(dfs.exists(baz));
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
dfs.saveNamespace();
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
}
// Ensure directories exist after restart
cluster.restartNameNode();
{
final DistributedFileSystem dfs = cluster.getFileSystem();
Assert.assertTrue(dfs.exists(foo));
Assert.assertTrue(dfs.exists(bar));
Assert.assertTrue(dfs.exists(baz));
}
} finally {
if(cluster != null) cluster.shutdown();
}
}
private static Configuration setConf(Configuration conf, File dir,
MiniJournalCluster mjc) {
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, dir.getAbsolutePath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
mjc.getQuorumJournalURI("myjournal").toString());
conf.setLong(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 0L);
return conf;
}
@Test (timeout = 30000)
public void testRollingUpgradeWithQJM() throws Exception {
String nnDirPrefix = MiniDFSCluster.getBaseDirectory() + "/nn/";
final File nn1Dir = new File(nnDirPrefix + "image1");
final File nn2Dir = new File(nnDirPrefix + "image2");
LOG.info("nn1Dir=" + nn1Dir);
LOG.info("nn2Dir=" + nn2Dir);
final Configuration conf = new HdfsConfiguration();
final MiniJournalCluster mjc = new MiniJournalCluster.Builder(conf).build();
setConf(conf, nn1Dir, mjc);
{
// Start the cluster once to generate the dfs dirs
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(0)
.manageNameDfsDirs(false)
.checkExitOnShutdown(false)
.build();
// Shutdown the cluster before making a copy of the namenode dir to release
// all file locks, otherwise, the copy will fail on some platforms.
cluster.shutdown();
}
MiniDFSCluster cluster2 = null;
try {
// Start a second NN pointed to the same quorum.
// We need to copy the image dir from the first NN -- or else
// the new NN will just be rejected because of Namespace mismatch.
FileUtil.fullyDelete(nn2Dir);
FileUtil.copy(nn1Dir, FileSystem.getLocal(conf).getRaw(),
new Path(nn2Dir.getAbsolutePath()), false, conf);
// Start the cluster again
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(0)
.format(false)
.manageNameDfsDirs(false)
.checkExitOnShutdown(false)
.build();
final Path foo = new Path("/foo");
final Path bar = new Path("/bar");
final Path baz = new Path("/baz");
final RollingUpgradeInfo info1;
{
final DistributedFileSystem dfs = cluster.getFileSystem();
dfs.mkdirs(foo);
//start rolling upgrade
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
info1 = dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
LOG.info("START\n" + info1);
//query rolling upgrade
assertEquals(info1, dfs.rollingUpgrade(RollingUpgradeAction.QUERY));
dfs.mkdirs(bar);
cluster.shutdown();
}
// cluster2 takes over QJM
final Configuration conf2 = setConf(new Configuration(), nn2Dir, mjc);
cluster2 = new MiniDFSCluster.Builder(conf2)
.numDataNodes(0)
.format(false)
.manageNameDfsDirs(false)
.build();
final DistributedFileSystem dfs2 = cluster2.getFileSystem();
// Check that cluster2 sees the edits made on cluster1
Assert.assertTrue(dfs2.exists(foo));
Assert.assertTrue(dfs2.exists(bar));
Assert.assertFalse(dfs2.exists(baz));
//query rolling upgrade in cluster2
assertEquals(info1, dfs2.rollingUpgrade(RollingUpgradeAction.QUERY));
dfs2.mkdirs(baz);
LOG.info("RESTART cluster 2");
cluster2.restartNameNode();
assertEquals(info1, dfs2.rollingUpgrade(RollingUpgradeAction.QUERY));
Assert.assertTrue(dfs2.exists(foo));
Assert.assertTrue(dfs2.exists(bar));
Assert.assertTrue(dfs2.exists(baz));
//restart cluster with -upgrade should fail.
try {
cluster2.restartNameNode("-upgrade");
} catch(IOException e) {
LOG.info("The exception is expected.", e);
}
LOG.info("RESTART cluster 2 again");
cluster2.restartNameNode();
assertEquals(info1, dfs2.rollingUpgrade(RollingUpgradeAction.QUERY));
Assert.assertTrue(dfs2.exists(foo));
Assert.assertTrue(dfs2.exists(bar));
Assert.assertTrue(dfs2.exists(baz));
//finalize rolling upgrade
final RollingUpgradeInfo finalize = dfs2.rollingUpgrade(
RollingUpgradeAction.FINALIZE);
Assert.assertTrue(finalize.isFinalized());
LOG.info("RESTART cluster 2 with regular startup option");
cluster2.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR);
cluster2.restartNameNode();
Assert.assertTrue(dfs2.exists(foo));
Assert.assertTrue(dfs2.exists(bar));
Assert.assertTrue(dfs2.exists(baz));
} finally {
if (cluster2 != null) cluster2.shutdown();
}
}
private static CompositeDataSupport getBean()
throws MalformedObjectNameException, MBeanException,
AttributeNotFoundException, InstanceNotFoundException,
ReflectionException {
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName =
new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo");
return (CompositeDataSupport)mbs.getAttribute(mxbeanName,
"RollingUpgradeStatus");
}
private static void checkMxBeanIsNull() throws Exception {
CompositeDataSupport ruBean = getBean();
assertNull(ruBean);
}
private static void checkMxBean() throws Exception {
CompositeDataSupport ruBean = getBean();
assertNotEquals(0l, ruBean.get("startTime"));
assertEquals(0l, ruBean.get("finalizeTime"));
}
@Test
public void testRollback() throws Exception {
// start a cluster
final Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final Path foo = new Path("/foo");
final Path bar = new Path("/bar");
cluster.getFileSystem().mkdirs(foo);
final Path file = new Path(foo, "file");
final byte[] data = new byte[1024];
ThreadLocalRandom.current().nextBytes(data);
final FSDataOutputStream out = cluster.getFileSystem().create(file);
out.write(data, 0, data.length);
out.close();
checkMxBeanIsNull();
startRollingUpgrade(foo, bar, file, data, cluster);
checkMxBean();
cluster.getFileSystem().rollEdits();
cluster.getFileSystem().rollEdits();
rollbackRollingUpgrade(foo, bar, file, data, cluster);
checkMxBeanIsNull();
startRollingUpgrade(foo, bar, file, data, cluster);
cluster.getFileSystem().rollEdits();
cluster.getFileSystem().rollEdits();
rollbackRollingUpgrade(foo, bar, file, data, cluster);
startRollingUpgrade(foo, bar, file, data, cluster);
cluster.restartNameNode();
rollbackRollingUpgrade(foo, bar, file, data, cluster);
startRollingUpgrade(foo, bar, file, data, cluster);
cluster.restartNameNode();
rollbackRollingUpgrade(foo, bar, file, data, cluster);
startRollingUpgrade(foo, bar, file, data, cluster);
rollbackRollingUpgrade(foo, bar, file, data, cluster);
startRollingUpgrade(foo, bar, file, data, cluster);
rollbackRollingUpgrade(foo, bar, file, data, cluster);
} finally {
if(cluster != null) cluster.shutdown();
}
}
private static void startRollingUpgrade(Path foo, Path bar,
Path file, byte[] data,
MiniDFSCluster cluster) throws IOException {
final DistributedFileSystem dfs = cluster.getFileSystem();
//start rolling upgrade
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
dfs.mkdirs(bar);
Assert.assertTrue(dfs.exists(foo));
Assert.assertTrue(dfs.exists(bar));
//truncate a file
final int newLength = ThreadLocalRandom.current().nextInt(data.length - 1)
+ 1;
dfs.truncate(file, newLength);
TestFileTruncate.checkBlockRecovery(file, dfs);
AppendTestUtil.checkFullFile(dfs, file, newLength, data);
}
private static void rollbackRollingUpgrade(Path foo, Path bar,
Path file, byte[] data,
MiniDFSCluster cluster) throws IOException {
final DataNodeProperties dnprop = cluster.stopDataNode(0);
cluster.restartNameNode("-rollingUpgrade", "rollback");
cluster.restartDataNode(dnprop, true);
final DistributedFileSystem dfs = cluster.getFileSystem();
Assert.assertTrue(dfs.exists(foo));
Assert.assertFalse(dfs.exists(bar));
AppendTestUtil.checkFullFile(dfs, file, data.length, data);
}
@Test
public void testDFSAdminDatanodeUpgradeControlCommands() throws Exception {
// start a cluster
final Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final DFSAdmin dfsadmin = new DFSAdmin(conf);
DataNode dn = cluster.getDataNodes().get(0);
// check the datanode
final String dnAddr = dn.getDatanodeId().getIpcAddr(false);
final String[] args1 = {"-getDatanodeInfo", dnAddr};
runCmd(dfsadmin, true, args1);
// issue shutdown to the datanode.
final String[] args2 = {"-shutdownDatanode", dnAddr, "upgrade" };
runCmd(dfsadmin, true, args2);
// the datanode should be down.
Thread.sleep(2000);
Assert.assertFalse("DataNode should exit", dn.isDatanodeUp());
// ping should fail.
assertEquals(-1, dfsadmin.run(args1));
} finally {
if (cluster != null) cluster.shutdown();
}
}
@Test (timeout = 300000)
public void testFinalize() throws Exception {
final Configuration conf = new HdfsConfiguration();
MiniQJMHACluster cluster = null;
final Path foo = new Path("/foo");
final Path bar = new Path("/bar");
try {
cluster = new MiniQJMHACluster.Builder(conf).build();
MiniDFSCluster dfsCluster = cluster.getDfsCluster();
dfsCluster.waitActive();
// let NN1 tail editlog every 1s
dfsCluster.getConfiguration(1).setInt(
DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
dfsCluster.restartNameNode(1);
dfsCluster.transitionToActive(0);
DistributedFileSystem dfs = dfsCluster.getFileSystem(0);
dfs.mkdirs(foo);
FSImage fsimage = dfsCluster.getNamesystem(0).getFSImage();
// start rolling upgrade
RollingUpgradeInfo info = dfs
.rollingUpgrade(RollingUpgradeAction.PREPARE);
Assert.assertTrue(info.isStarted());
dfs.mkdirs(bar);
queryForPreparation(dfs);
// The NN should have a copy of the fsimage in case of rollbacks.
Assert.assertTrue(fsimage.hasRollbackFSImage());
info = dfs.rollingUpgrade(RollingUpgradeAction.FINALIZE);
Assert.assertTrue(info.isFinalized());
Assert.assertTrue(dfs.exists(foo));
// Once finalized, there should be no more fsimage for rollbacks.
Assert.assertFalse(fsimage.hasRollbackFSImage());
// Should have no problem in restart and replaying edits that include
// the FINALIZE op.
dfsCluster.restartNameNode(0);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test (timeout = 300000)
public void testQuery() throws Exception {
final Configuration conf = new Configuration();
MiniQJMHACluster cluster = null;
try {
cluster = new MiniQJMHACluster.Builder(conf).build();
MiniDFSCluster dfsCluster = cluster.getDfsCluster();
dfsCluster.waitActive();
dfsCluster.transitionToActive(0);
DistributedFileSystem dfs = dfsCluster.getFileSystem(0);
dfsCluster.shutdownNameNode(1);
// start rolling upgrade
RollingUpgradeInfo info = dfs
.rollingUpgrade(RollingUpgradeAction.PREPARE);
Assert.assertTrue(info.isStarted());
info = dfs.rollingUpgrade(RollingUpgradeAction.QUERY);
Assert.assertFalse(info.createdRollbackImages());
dfsCluster.restartNameNode(1);
queryForPreparation(dfs);
// The NN should have a copy of the fsimage in case of rollbacks.
Assert.assertTrue(dfsCluster.getNamesystem(0).getFSImage()
.hasRollbackFSImage());
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test (timeout = 300000)
public void testQueryAfterRestart() throws IOException, InterruptedException {
final Configuration conf = new Configuration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
DistributedFileSystem dfs = cluster.getFileSystem();
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
// start rolling upgrade
dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
queryForPreparation(dfs);
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
dfs.saveNamespace();
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
cluster.restartNameNodes();
dfs.rollingUpgrade(RollingUpgradeAction.QUERY);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test(timeout = 300000)
public void testCheckpoint() throws IOException, InterruptedException {
final Configuration conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 1);
MiniQJMHACluster cluster = null;
final Path foo = new Path("/foo");
try {
cluster = new MiniQJMHACluster.Builder(conf).build();
MiniDFSCluster dfsCluster = cluster.getDfsCluster();
dfsCluster.waitActive();
dfsCluster.transitionToActive(0);
DistributedFileSystem dfs = dfsCluster.getFileSystem(0);
// start rolling upgrade
RollingUpgradeInfo info = dfs
.rollingUpgrade(RollingUpgradeAction.PREPARE);
Assert.assertTrue(info.isStarted());
queryForPreparation(dfs);
dfs.mkdirs(foo);
long txid = dfs.rollEdits();
Assert.assertTrue(txid > 0);
int retries = 0;
while (++retries < 5) {
NNStorage storage = dfsCluster.getNamesystem(1).getFSImage()
.getStorage();
if (storage.getFsImageName(txid - 1) != null) {
return;
}
Thread.sleep(1000);
}
Assert.fail("new checkpoint does not exist");
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
static void queryForPreparation(DistributedFileSystem dfs) throws IOException,
InterruptedException {
RollingUpgradeInfo info;
int retries = 0;
while (++retries < 10) {
info = dfs.rollingUpgrade(RollingUpgradeAction.QUERY);
if (info.createdRollbackImages()) {
break;
}
Thread.sleep(1000);
}
if (retries >= 10) {
Assert.fail("Query return false");
}
}
/**
* In non-HA setup, after rolling upgrade prepare, the Secondary NN should
* still be able to do checkpoint
*/
@Test
public void testCheckpointWithSNN() throws Exception {
MiniDFSCluster cluster = null;
DistributedFileSystem dfs = null;
SecondaryNameNode snn = null;
try {
Configuration conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
"0.0.0.0:0");
snn = new SecondaryNameNode(conf);
dfs = cluster.getFileSystem();
dfs.mkdirs(new Path("/test/foo"));
snn.doCheckpoint();
//start rolling upgrade
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
dfs.mkdirs(new Path("/test/bar"));
// do checkpoint in SNN again
snn.doCheckpoint();
} finally {
IOUtils.cleanup(null, dfs);
if (snn != null) {
snn.shutdown();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
}
| 21,546 | 32.772727 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.tools.DFSAdmin;
import org.apache.hadoop.io.IOUtils;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.Mockito;
/**
* This tests pipeline recovery related client protocol works correct or not.
*/
public class TestClientProtocolForPipelineRecovery {
@Test public void testGetNewStamp() throws IOException {
int numDataNodes = 1;
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
try {
cluster.waitActive();
FileSystem fileSys = cluster.getFileSystem();
NamenodeProtocols namenode = cluster.getNameNodeRpc();
/* Test writing to finalized replicas */
Path file = new Path("dataprotocol.dat");
DFSTestUtil.createFile(fileSys, file, 1L, (short)numDataNodes, 0L);
// get the first blockid for the file
ExtendedBlock firstBlock = DFSTestUtil.getFirstBlock(fileSys, file);
// test getNewStampAndToken on a finalized block
try {
namenode.updateBlockForPipeline(firstBlock, "");
Assert.fail("Can not get a new GS from a finalized block");
} catch (IOException e) {
Assert.assertTrue(e.getMessage().contains("is not under Construction"));
}
// test getNewStampAndToken on a non-existent block
try {
long newBlockId = firstBlock.getBlockId() + 1;
ExtendedBlock newBlock = new ExtendedBlock(firstBlock.getBlockPoolId(),
newBlockId, 0, firstBlock.getGenerationStamp());
namenode.updateBlockForPipeline(newBlock, "");
Assert.fail("Cannot get a new GS from a non-existent block");
} catch (IOException e) {
Assert.assertTrue(e.getMessage().contains("does not exist"));
}
/* Test RBW replicas */
// change first block to a RBW
DFSOutputStream out = null;
try {
out = (DFSOutputStream)(fileSys.append(file).
getWrappedStream());
out.write(1);
out.hflush();
FSDataInputStream in = null;
try {
in = fileSys.open(file);
firstBlock = DFSTestUtil.getAllBlocks(in).get(0).getBlock();
} finally {
IOUtils.closeStream(in);
}
// test non-lease holder
DFSClient dfs = ((DistributedFileSystem)fileSys).dfs;
try {
namenode.updateBlockForPipeline(firstBlock, "test" + dfs.clientName);
Assert.fail("Cannot get a new GS for a non lease holder");
} catch (LeaseExpiredException e) {
Assert.assertTrue(e.getMessage().startsWith("Lease mismatch"));
}
// test null lease holder
try {
namenode.updateBlockForPipeline(firstBlock, null);
Assert.fail("Cannot get a new GS for a null lease holder");
} catch (LeaseExpiredException e) {
Assert.assertTrue(e.getMessage().startsWith("Lease mismatch"));
}
// test getNewStampAndToken on a rbw block
namenode.updateBlockForPipeline(firstBlock, dfs.clientName);
} finally {
IOUtils.closeStream(out);
}
} finally {
cluster.shutdown();
}
}
/** Test whether corrupt replicas are detected correctly during pipeline
* recoveries.
*/
@Test
public void testPipelineRecoveryForLastBlock() throws IOException {
DFSClientFaultInjector faultInjector
= Mockito.mock(DFSClientFaultInjector.class);
DFSClientFaultInjector oldInjector = DFSClientFaultInjector.instance;
DFSClientFaultInjector.instance = faultInjector;
Configuration conf = new HdfsConfiguration();
conf.setInt(HdfsClientConfigKeys.BlockWrite.LOCATEFOLLOWINGBLOCK_RETRIES_KEY, 3);
MiniDFSCluster cluster = null;
try {
int numDataNodes = 3;
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
cluster.waitActive();
FileSystem fileSys = cluster.getFileSystem();
Path file = new Path("dataprotocol1.dat");
Mockito.when(faultInjector.failPacket()).thenReturn(true);
DFSTestUtil.createFile(fileSys, file, 68000000L, (short)numDataNodes, 0L);
// At this point, NN should have accepted only valid replicas.
// Read should succeed.
FSDataInputStream in = fileSys.open(file);
try {
in.read();
// Test will fail with BlockMissingException if NN does not update the
// replica state based on the latest report.
} catch (org.apache.hadoop.hdfs.BlockMissingException bme) {
Assert.fail("Block is missing because the file was closed with"
+ " corrupt replicas.");
}
} finally {
DFSClientFaultInjector.instance = oldInjector;
if (cluster != null) {
cluster.shutdown();
}
}
}
/**
* Test recovery on restart OOB message. It also tests the delivery of
* OOB ack originating from the primary datanode. Since there is only
* one node in the cluster, failure of restart-recovery will fail the
* test.
*/
@Test
public void testPipelineRecoveryOnOOB() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY, "15");
MiniDFSCluster cluster = null;
try {
int numDataNodes = 1;
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
cluster.waitActive();
FileSystem fileSys = cluster.getFileSystem();
Path file = new Path("dataprotocol2.dat");
DFSTestUtil.createFile(fileSys, file, 10240L, (short)1, 0L);
DFSOutputStream out = (DFSOutputStream)(fileSys.append(file).
getWrappedStream());
out.write(1);
out.hflush();
DFSAdmin dfsadmin = new DFSAdmin(conf);
DataNode dn = cluster.getDataNodes().get(0);
final String dnAddr = dn.getDatanodeId().getIpcAddr(false);
// issue shutdown to the datanode.
final String[] args1 = {"-shutdownDatanode", dnAddr, "upgrade" };
Assert.assertEquals(0, dfsadmin.run(args1));
// Wait long enough to receive an OOB ack before closing the file.
Thread.sleep(4000);
// Retart the datanode
cluster.restartDataNode(0, true);
// The following forces a data packet and end of block packets to be sent.
out.close();
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
/** Test restart timeout */
@Test
public void testPipelineRecoveryOnRestartFailure() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY, "5");
MiniDFSCluster cluster = null;
try {
int numDataNodes = 2;
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
cluster.waitActive();
FileSystem fileSys = cluster.getFileSystem();
Path file = new Path("dataprotocol3.dat");
DFSTestUtil.createFile(fileSys, file, 10240L, (short)2, 0L);
DFSOutputStream out = (DFSOutputStream)(fileSys.append(file).
getWrappedStream());
out.write(1);
out.hflush();
DFSAdmin dfsadmin = new DFSAdmin(conf);
DataNode dn = cluster.getDataNodes().get(0);
final String dnAddr1 = dn.getDatanodeId().getIpcAddr(false);
// issue shutdown to the datanode.
final String[] args1 = {"-shutdownDatanode", dnAddr1, "upgrade" };
Assert.assertEquals(0, dfsadmin.run(args1));
Thread.sleep(4000);
// This should succeed without restarting the node. The restart will
// expire and regular pipeline recovery will kick in.
out.close();
// At this point there is only one node in the cluster.
out = (DFSOutputStream)(fileSys.append(file).
getWrappedStream());
out.write(1);
out.hflush();
dn = cluster.getDataNodes().get(1);
final String dnAddr2 = dn.getDatanodeId().getIpcAddr(false);
// issue shutdown to the datanode.
final String[] args2 = {"-shutdownDatanode", dnAddr2, "upgrade" };
Assert.assertEquals(0, dfsadmin.run(args2));
Thread.sleep(4000);
try {
// close should fail
out.close();
assert false;
} catch (IOException ioe) { }
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
}
| 9,713 | 36.361538 | 97 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE;
import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName;
import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName;
import static org.apache.hadoop.test.GenericTestUtils.assertExists;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.File;
import java.io.FilenameFilter;
import java.io.IOException;
import java.nio.file.Files;
import java.util.List;
import java.util.regex.Pattern;
import com.google.common.base.Preconditions;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.inotify.Event;
import org.apache.hadoop.hdfs.inotify.EventBatch;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream;
import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
import org.apache.hadoop.hdfs.server.namenode.NNStorage;
import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
import org.apache.hadoop.hdfs.server.namenode.TestParallelImageWrite;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.util.StringUtils;
import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Test;
import static org.apache.hadoop.hdfs.inotify.Event.CreateEvent;
import com.google.common.base.Charsets;
import com.google.common.base.Joiner;
/**
* This test ensures the appropriate response (successful or failure) from
* the system when the system is upgraded under various storage state and
* version conditions.
*/
public class TestDFSUpgrade {
// TODO: Avoid hard-coding expected_txid. The test should be more robust.
private static final int EXPECTED_TXID = 61;
private static final Log LOG = LogFactory.getLog(TestDFSUpgrade.class.getName());
private Configuration conf;
private int testCounter = 0;
private MiniDFSCluster cluster = null;
/**
* Writes an INFO log message containing the parameters.
*/
void log(String label, int numDirs) {
LOG.info("============================================================");
LOG.info("***TEST " + (testCounter++) + "*** "
+ label + ":"
+ " numDirs="+numDirs);
}
/**
* For namenode, Verify that the current and previous directories exist.
* Verify that previous hasn't been modified by comparing the checksum of all
* its files with their original checksum. It is assumed that the
* server has recovered and upgraded.
*/
void checkNameNode(String[] baseDirs, long imageTxId) throws IOException {
for (String baseDir : baseDirs) {
LOG.info("Checking namenode directory " + baseDir);
LOG.info("==== Contents ====:\n " +
Joiner.on(" \n").join(new File(baseDir, "current").list()));
LOG.info("==================");
assertExists(new File(baseDir,"current"));
assertExists(new File(baseDir,"current/VERSION"));
assertExists(new File(baseDir,"current/"
+ getInProgressEditsFileName(imageTxId + 1)));
assertExists(new File(baseDir,"current/"
+ getImageFileName(imageTxId)));
assertExists(new File(baseDir,"current/seen_txid"));
File previous = new File(baseDir, "previous");
assertExists(previous);
assertEquals(UpgradeUtilities.checksumContents(NAME_NODE, previous, false),
UpgradeUtilities.checksumMasterNameNodeContents());
}
}
/**
* For datanode, for a block pool, verify that the current and previous
* directories exist. Verify that previous hasn't been modified by comparing
* the checksum of all its files with their original checksum. It
* is assumed that the server has recovered and upgraded.
*/
void checkDataNode(String[] baseDirs, String bpid) throws IOException {
for (int i = 0; i < baseDirs.length; i++) {
File current = new File(baseDirs[i], "current/" + bpid + "/current");
assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, current, false),
UpgradeUtilities.checksumMasterDataNodeContents());
// block files are placed under <sd>/current/<bpid>/current/finalized
File currentFinalized =
MiniDFSCluster.getFinalizedDir(new File(baseDirs[i]), bpid);
assertEquals(UpgradeUtilities.checksumContents(DATA_NODE,
currentFinalized, true),
UpgradeUtilities.checksumMasterBlockPoolFinalizedContents());
File previous = new File(baseDirs[i], "current/" + bpid + "/previous");
assertTrue(previous.isDirectory());
assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, previous, false),
UpgradeUtilities.checksumMasterDataNodeContents());
File previousFinalized =
new File(baseDirs[i], "current/" + bpid + "/previous"+"/finalized");
assertEquals(UpgradeUtilities.checksumContents(DATA_NODE,
previousFinalized, true),
UpgradeUtilities.checksumMasterBlockPoolFinalizedContents());
}
}
/**
* Attempts to start a NameNode with the given operation. Starting
* the NameNode should throw an exception.
*/
void startNameNodeShouldFail(StartupOption operation) {
startNameNodeShouldFail(operation, null, null);
}
/**
* Attempts to start a NameNode with the given operation. Starting
* the NameNode should throw an exception.
* @param operation - NameNode startup operation
* @param exceptionClass - if non-null, will check that the caught exception
* is assignment-compatible with exceptionClass
* @param messagePattern - if non-null, will check that a substring of the
* message from the caught exception matches this pattern, via the
* {@link Matcher#find()} method.
*/
void startNameNodeShouldFail(StartupOption operation,
Class<? extends Exception> exceptionClass, Pattern messagePattern) {
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
.startupOption(operation)
.format(false)
.manageDataDfsDirs(false)
.manageNameDfsDirs(false)
.build(); // should fail
fail("NameNode should have failed to start");
} catch (Exception e) {
// expect exception
if (exceptionClass != null) {
assertTrue("Caught exception is not of expected class "
+ exceptionClass.getSimpleName() + ": "
+ StringUtils.stringifyException(e),
exceptionClass.isInstance(e));
}
if (messagePattern != null) {
assertTrue("Caught exception message string does not match expected pattern \""
+ messagePattern.pattern() + "\" : "
+ StringUtils.stringifyException(e),
messagePattern.matcher(e.getMessage()).find());
}
LOG.info("Successfully detected expected NameNode startup failure.");
}
}
/**
* Attempts to start a DataNode with the given operation. Starting
* the given block pool should fail.
* @param operation startup option
* @param bpid block pool Id that should fail to start
* @throws IOException
*/
void startBlockPoolShouldFail(StartupOption operation, String bpid) throws IOException {
cluster.startDataNodes(conf, 1, false, operation, null); // should fail
assertFalse("Block pool " + bpid + " should have failed to start",
cluster.getDataNodes().get(0).isBPServiceAlive(bpid));
}
/**
* Create an instance of a newly configured cluster for testing that does
* not manage its own directories or files
*/
private MiniDFSCluster createCluster() throws IOException {
return new MiniDFSCluster.Builder(conf).numDataNodes(0)
.format(false)
.manageDataDfsDirs(false)
.manageNameDfsDirs(false)
.startupOption(StartupOption.UPGRADE)
.build();
}
@BeforeClass
public static void initialize() throws Exception {
UpgradeUtilities.initialize();
}
/**
* This test attempts to upgrade the NameNode and DataNode under
* a number of valid and invalid conditions.
*/
@Test(timeout = 60000)
public void testUpgrade() throws Exception {
File[] baseDirs;
StorageInfo storageInfo = null;
for (int numDirs = 1; numDirs <= 2; numDirs++) {
conf = new HdfsConfiguration();
conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
String[] nameNodeDirs = conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
String[] dataNodeDirs = conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION, false);
log("Normal NameNode upgrade", numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
cluster = createCluster();
// make sure that rolling upgrade cannot be started
try {
final DistributedFileSystem dfs = cluster.getFileSystem();
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
fail();
} catch(RemoteException re) {
assertEquals(InconsistentFSStateException.class.getName(),
re.getClassName());
LOG.info("The exception is expected.", re);
}
checkNameNode(nameNodeDirs, EXPECTED_TXID);
if (numDirs > 1)
TestParallelImageWrite.checkImages(cluster.getNamesystem(), numDirs);
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("Normal DataNode upgrade", numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
cluster = createCluster();
UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
checkDataNode(dataNodeDirs, UpgradeUtilities.getCurrentBlockPoolID(null));
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
UpgradeUtilities.createEmptyDirs(dataNodeDirs);
log("NameNode upgrade with existing previous dir", numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
startNameNodeShouldFail(StartupOption.UPGRADE);
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("DataNode upgrade with existing previous dir", numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
cluster = createCluster();
UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "previous");
cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
checkDataNode(dataNodeDirs, UpgradeUtilities.getCurrentBlockPoolID(null));
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
UpgradeUtilities.createEmptyDirs(dataNodeDirs);
log("DataNode upgrade with future stored layout version in current", numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
cluster = createCluster();
baseDirs = UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
storageInfo = new StorageInfo(Integer.MIN_VALUE,
UpgradeUtilities.getCurrentNamespaceID(cluster),
UpgradeUtilities.getCurrentClusterID(cluster),
UpgradeUtilities.getCurrentFsscTime(cluster), NodeType.DATA_NODE);
UpgradeUtilities.createDataNodeVersionFile(baseDirs, storageInfo,
UpgradeUtilities.getCurrentBlockPoolID(cluster));
startBlockPoolShouldFail(StartupOption.REGULAR, UpgradeUtilities
.getCurrentBlockPoolID(null));
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
UpgradeUtilities.createEmptyDirs(dataNodeDirs);
log("DataNode upgrade with newer fsscTime in current", numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
cluster = createCluster();
baseDirs = UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
storageInfo = new StorageInfo(HdfsServerConstants.DATANODE_LAYOUT_VERSION,
UpgradeUtilities.getCurrentNamespaceID(cluster),
UpgradeUtilities.getCurrentClusterID(cluster), Long.MAX_VALUE,
NodeType.DATA_NODE);
UpgradeUtilities.createDataNodeVersionFile(baseDirs, storageInfo,
UpgradeUtilities.getCurrentBlockPoolID(cluster));
// Ensure corresponding block pool failed to initialized
startBlockPoolShouldFail(StartupOption.REGULAR, UpgradeUtilities
.getCurrentBlockPoolID(null));
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
UpgradeUtilities.createEmptyDirs(dataNodeDirs);
log("NameNode upgrade with no edits file", numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
deleteStorageFilesWithPrefix(nameNodeDirs, "edits_");
startNameNodeShouldFail(StartupOption.UPGRADE);
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("NameNode upgrade with no image file", numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
deleteStorageFilesWithPrefix(nameNodeDirs, "fsimage_");
startNameNodeShouldFail(StartupOption.UPGRADE);
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("NameNode upgrade with corrupt version file", numDirs);
baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
for (File f : baseDirs) {
UpgradeUtilities.corruptFile(
new File(f,"VERSION"),
"layoutVersion".getBytes(Charsets.UTF_8),
"xxxxxxxxxxxxx".getBytes(Charsets.UTF_8));
}
startNameNodeShouldFail(StartupOption.UPGRADE);
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("NameNode upgrade with old layout version in current", numDirs);
baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
storageInfo = new StorageInfo(Storage.LAST_UPGRADABLE_LAYOUT_VERSION + 1,
UpgradeUtilities.getCurrentNamespaceID(null),
UpgradeUtilities.getCurrentClusterID(null),
UpgradeUtilities.getCurrentFsscTime(null), NodeType.NAME_NODE);
UpgradeUtilities.createNameNodeVersionFile(conf, baseDirs, storageInfo,
UpgradeUtilities.getCurrentBlockPoolID(cluster));
startNameNodeShouldFail(StartupOption.UPGRADE);
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("NameNode upgrade with future layout version in current", numDirs);
baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
storageInfo = new StorageInfo(Integer.MIN_VALUE,
UpgradeUtilities.getCurrentNamespaceID(null),
UpgradeUtilities.getCurrentClusterID(null),
UpgradeUtilities.getCurrentFsscTime(null), NodeType.NAME_NODE);
UpgradeUtilities.createNameNodeVersionFile(conf, baseDirs, storageInfo,
UpgradeUtilities.getCurrentBlockPoolID(cluster));
startNameNodeShouldFail(StartupOption.UPGRADE);
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
} // end numDir loop
// One more check: normal NN upgrade with 4 directories, concurrent write
int numDirs = 4;
{
conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION, false);
conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
String[] nameNodeDirs = conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
log("Normal NameNode upgrade", numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
cluster = createCluster();
// make sure that rolling upgrade cannot be started
try {
final DistributedFileSystem dfs = cluster.getFileSystem();
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
fail();
} catch(RemoteException re) {
assertEquals(InconsistentFSStateException.class.getName(),
re.getClassName());
LOG.info("The exception is expected.", re);
}
checkNameNode(nameNodeDirs, EXPECTED_TXID);
TestParallelImageWrite.checkImages(cluster.getNamesystem(), numDirs);
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
}
}
/*
* Stand-alone test to detect failure of one SD during parallel upgrade.
* At this time, can only be done with manual hack of {@link FSImage.doUpgrade()}
*/
@Ignore
public void testUpgrade4() throws Exception {
int numDirs = 4;
conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION, false);
conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
String[] nameNodeDirs = conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
log("NameNode upgrade with one bad storage dir", numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
try {
// assert("storage dir has been prepared for failure before reaching this point");
startNameNodeShouldFail(StartupOption.UPGRADE, IOException.class,
Pattern.compile("failed in 1 storage"));
} finally {
// assert("storage dir shall be returned to normal state before exiting");
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
}
}
private void deleteStorageFilesWithPrefix(String[] nameNodeDirs, String prefix)
throws Exception {
for (String baseDirStr : nameNodeDirs) {
File baseDir = new File(baseDirStr);
File currentDir = new File(baseDir, "current");
for (File f : currentDir.listFiles()) {
if (f.getName().startsWith(prefix)) {
assertTrue("Deleting " + f, f.delete());
}
}
}
}
@Test(expected=IOException.class)
public void testUpgradeFromPreUpgradeLVFails() throws IOException {
// Upgrade from versions prior to Storage#LAST_UPGRADABLE_LAYOUT_VERSION
// is not allowed
Storage.checkVersionUpgradable(Storage.LAST_PRE_UPGRADE_LAYOUT_VERSION + 1);
fail("Expected IOException is not thrown");
}
@Ignore
public void test203LayoutVersion() {
for (int lv : Storage.LAYOUT_VERSIONS_203) {
assertTrue(Storage.is203LayoutVersion(lv));
}
}
@Test
public void testPreserveEditLogs() throws Exception {
conf = new HdfsConfiguration();
conf = UpgradeUtilities.initializeStorageStateConf(1, conf);
String[] nameNodeDirs = conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION, false);
log("Normal NameNode upgrade", 1);
File[] created =
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
for (final File createdDir : created) {
List<String> fileNameList =
IOUtils.listDirectory(createdDir, EditLogsFilter.INSTANCE);
for (String fileName : fileNameList) {
String tmpFileName = fileName + ".tmp";
File existingFile = new File(createdDir, fileName);
File tmpFile = new File(createdDir, tmpFileName);
Files.move(existingFile.toPath(), tmpFile.toPath());
File newFile = new File(createdDir, fileName);
Preconditions.checkState(newFile.createNewFile(),
"Cannot create new edits log file in " + createdDir);
EditLogFileInputStream in = new EditLogFileInputStream(tmpFile,
HdfsServerConstants.INVALID_TXID, HdfsServerConstants.INVALID_TXID,
false);
EditLogFileOutputStream out = new EditLogFileOutputStream(conf, newFile,
(int)tmpFile.length());
out.create(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION + 1);
FSEditLogOp logOp = in.readOp();
while (logOp != null) {
out.write(logOp);
logOp = in.readOp();
}
out.setReadyToFlush();
out.flushAndSync(true);
out.close();
Files.delete(tmpFile.toPath());
}
}
cluster = createCluster();
DFSInotifyEventInputStream ieis =
cluster.getFileSystem().getInotifyEventStream(0);
EventBatch batch = ieis.poll();
Event[] events = batch.getEvents();
assertTrue("Should be able to get transactions before the upgrade.",
events.length > 0);
assertEquals(events[0].getEventType(), Event.EventType.CREATE);
assertEquals(((CreateEvent) events[0]).getPath(), "/TestUpgrade");
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
}
private enum EditLogsFilter implements FilenameFilter {
INSTANCE;
@Override
public boolean accept(File dir, String name) {
return name.startsWith(NNStorage.NameNodeFile.EDITS.getName());
}
}
public static void main(String[] args) throws Exception {
TestDFSUpgrade t = new TestDFSUpgrade();
TestDFSUpgrade.initialize();
t.testUpgrade();
}
}
| 23,472 | 42.630112 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.log4j.Level;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestLeaseRecovery2 {
public static final Log LOG = LogFactory.getLog(TestLeaseRecovery2.class);
{
((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
}
static final private long BLOCK_SIZE = 1024;
static final private int FILE_SIZE = (int)BLOCK_SIZE*2;
static final short REPLICATION_NUM = (short)3;
static final byte[] buffer = new byte[FILE_SIZE];
static private final String fakeUsername = "fakeUser1";
static private final String fakeGroup = "supergroup";
static private MiniDFSCluster cluster;
static private DistributedFileSystem dfs;
final static private Configuration conf = new HdfsConfiguration();
final static private int BUF_SIZE = conf.getInt(
CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096);
final static private long SHORT_LEASE_PERIOD = 1000L;
final static private long LONG_LEASE_PERIOD = 60*60*SHORT_LEASE_PERIOD;
/** start a dfs cluster
*
* @throws IOException
*/
@BeforeClass
public static void startUp() throws IOException {
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).build();
cluster.waitActive();
dfs = cluster.getFileSystem();
}
/**
* stop the cluster
* @throws IOException
*/
@AfterClass
public static void tearDown() throws IOException {
IOUtils.closeStream(dfs);
if (cluster != null) {cluster.shutdown();}
}
/**
* Test the NameNode's revoke lease on current lease holder function.
* @throws Exception
*/
@Test
public void testImmediateRecoveryOfLease() throws Exception {
//create a file
// write bytes into the file.
byte [] actual = new byte[FILE_SIZE];
int size = AppendTestUtil.nextInt(FILE_SIZE);
Path filepath = createFile("/immediateRecoverLease-shortlease", size, true);
// set the soft limit to be 1 second so that the
// namenode triggers lease recovery on next attempt to write-for-open.
cluster.setLeasePeriod(SHORT_LEASE_PERIOD, LONG_LEASE_PERIOD);
recoverLeaseUsingCreate(filepath);
verifyFile(dfs, filepath, actual, size);
//test recoverLease
// set the soft limit to be 1 hour but recoverLease should
// close the file immediately
cluster.setLeasePeriod(LONG_LEASE_PERIOD, LONG_LEASE_PERIOD);
size = AppendTestUtil.nextInt(FILE_SIZE);
filepath = createFile("/immediateRecoverLease-longlease", size, false);
// test recoverLease from a different client
recoverLease(filepath, null);
verifyFile(dfs, filepath, actual, size);
// test recoverlease from the same client
size = AppendTestUtil.nextInt(FILE_SIZE);
filepath = createFile("/immediateRecoverLease-sameclient", size, false);
// create another file using the same client
Path filepath1 = new Path(filepath.toString() + AppendTestUtil.nextInt());
FSDataOutputStream stm = dfs.create(filepath1, true, BUF_SIZE,
REPLICATION_NUM, BLOCK_SIZE);
// recover the first file
recoverLease(filepath, dfs);
verifyFile(dfs, filepath, actual, size);
// continue to write to the second file
stm.write(buffer, 0, size);
stm.close();
verifyFile(dfs, filepath1, actual, size);
}
@Test
public void testLeaseRecoverByAnotherUser() throws Exception {
byte [] actual = new byte[FILE_SIZE];
cluster.setLeasePeriod(SHORT_LEASE_PERIOD, LONG_LEASE_PERIOD);
Path filepath = createFile("/immediateRecoverLease-x", 0, true);
recoverLeaseUsingCreate2(filepath);
verifyFile(dfs, filepath, actual, 0);
}
private Path createFile(final String filestr, final int size,
final boolean triggerLeaseRenewerInterrupt)
throws IOException, InterruptedException {
AppendTestUtil.LOG.info("filestr=" + filestr);
Path filepath = new Path(filestr);
FSDataOutputStream stm = dfs.create(filepath, true, BUF_SIZE,
REPLICATION_NUM, BLOCK_SIZE);
assertTrue(dfs.dfs.exists(filestr));
AppendTestUtil.LOG.info("size=" + size);
stm.write(buffer, 0, size);
// hflush file
AppendTestUtil.LOG.info("hflush");
stm.hflush();
if (triggerLeaseRenewerInterrupt) {
AppendTestUtil.LOG.info("leasechecker.interruptAndJoin()");
dfs.dfs.getLeaseRenewer().interruptAndJoin();
}
return filepath;
}
private void recoverLease(Path filepath, DistributedFileSystem dfs)
throws Exception {
if (dfs == null) {
dfs = (DistributedFileSystem)getFSAsAnotherUser(conf);
}
while (!dfs.recoverLease(filepath)) {
AppendTestUtil.LOG.info("sleep " + 5000 + "ms");
Thread.sleep(5000);
}
}
private FileSystem getFSAsAnotherUser(final Configuration c)
throws IOException, InterruptedException {
return FileSystem.get(FileSystem.getDefaultUri(c), c,
UserGroupInformation.createUserForTesting(fakeUsername,
new String [] {fakeGroup}).getUserName());
}
private void recoverLeaseUsingCreate(Path filepath)
throws IOException, InterruptedException {
FileSystem dfs2 = getFSAsAnotherUser(conf);
for(int i = 0; i < 10; i++) {
AppendTestUtil.LOG.info("i=" + i);
try {
dfs2.create(filepath, false, BUF_SIZE, (short)1, BLOCK_SIZE);
fail("Creation of an existing file should never succeed.");
} catch(FileAlreadyExistsException e) {
return; // expected
} catch(AlreadyBeingCreatedException e) {
return; // expected
} catch(IOException ioe) {
AppendTestUtil.LOG.warn("UNEXPECTED ", ioe);
AppendTestUtil.LOG.info("sleep " + 5000 + "ms");
try {Thread.sleep(5000);} catch (InterruptedException e) {}
}
}
fail("recoverLeaseUsingCreate failed");
}
private void recoverLeaseUsingCreate2(Path filepath)
throws Exception {
FileSystem dfs2 = getFSAsAnotherUser(conf);
int size = AppendTestUtil.nextInt(FILE_SIZE);
DistributedFileSystem dfsx = (DistributedFileSystem) dfs2;
//create file using dfsx
Path filepath2 = new Path("/immediateRecoverLease-x2");
FSDataOutputStream stm = dfsx.create(filepath2, true, BUF_SIZE,
REPLICATION_NUM, BLOCK_SIZE);
assertTrue(dfsx.dfs.exists("/immediateRecoverLease-x2"));
try {Thread.sleep(10000);} catch (InterruptedException e) {}
dfsx.append(filepath);
}
private void verifyFile(FileSystem dfs, Path filepath, byte[] actual,
int size) throws IOException {
AppendTestUtil.LOG.info("Lease for file " + filepath + " is recovered. "
+ "Validating its contents now...");
// verify that file-size matches
assertTrue("File should be " + size + " bytes, but is actually " +
" found to be " + dfs.getFileStatus(filepath).getLen() +
" bytes",
dfs.getFileStatus(filepath).getLen() == size);
// verify that there is enough data to read.
System.out.println("File size is good. Now validating sizes from datanodes...");
FSDataInputStream stmin = dfs.open(filepath);
stmin.readFully(0, actual, 0, size);
stmin.close();
}
/**
* This test makes the client does not renew its lease and also
* set the hard lease expiration period to be short 1s. Thus triggering
* lease expiration to happen while the client is still alive.
*
* The test makes sure that the lease recovery completes and the client
* fails if it continues to write to the file.
*
* @throws Exception
*/
@Test
public void testHardLeaseRecovery() throws Exception {
//create a file
String filestr = "/hardLeaseRecovery";
AppendTestUtil.LOG.info("filestr=" + filestr);
Path filepath = new Path(filestr);
FSDataOutputStream stm = dfs.create(filepath, true,
BUF_SIZE, REPLICATION_NUM, BLOCK_SIZE);
assertTrue(dfs.dfs.exists(filestr));
// write bytes into the file.
int size = AppendTestUtil.nextInt(FILE_SIZE);
AppendTestUtil.LOG.info("size=" + size);
stm.write(buffer, 0, size);
// hflush file
AppendTestUtil.LOG.info("hflush");
stm.hflush();
// kill the lease renewal thread
AppendTestUtil.LOG.info("leasechecker.interruptAndJoin()");
dfs.dfs.getLeaseRenewer().interruptAndJoin();
// set the hard limit to be 1 second
cluster.setLeasePeriod(LONG_LEASE_PERIOD, SHORT_LEASE_PERIOD);
// wait for lease recovery to complete
LocatedBlocks locatedBlocks;
do {
Thread.sleep(SHORT_LEASE_PERIOD);
locatedBlocks = dfs.dfs.getLocatedBlocks(filestr, 0L, size);
} while (locatedBlocks.isUnderConstruction());
assertEquals(size, locatedBlocks.getFileLength());
// make sure that the writer thread gets killed
try {
stm.write('b');
stm.close();
fail("Writer thread should have been killed");
} catch (IOException e) {
e.printStackTrace();
}
// verify data
AppendTestUtil.LOG.info(
"File size is good. Now validating sizes from datanodes...");
AppendTestUtil.checkFullFile(dfs, filepath, size, buffer, filestr);
}
/**
* This test makes the client does not renew its lease and also
* set the soft lease expiration period to be short 1s. Thus triggering
* soft lease expiration to happen immediately by having another client
* trying to create the same file.
*
* The test makes sure that the lease recovery completes.
*
* @throws Exception
*/
@Test
public void testSoftLeaseRecovery() throws Exception {
Map<String, String []> u2g_map = new HashMap<String, String []>(1);
u2g_map.put(fakeUsername, new String[] {fakeGroup});
DFSTestUtil.updateConfWithFakeGroupMapping(conf, u2g_map);
// Reset default lease periods
cluster.setLeasePeriod(HdfsServerConstants.LEASE_SOFTLIMIT_PERIOD,
HdfsServerConstants.LEASE_HARDLIMIT_PERIOD);
//create a file
// create a random file name
String filestr = "/foo" + AppendTestUtil.nextInt();
AppendTestUtil.LOG.info("filestr=" + filestr);
Path filepath = new Path(filestr);
FSDataOutputStream stm = dfs.create(filepath, true,
BUF_SIZE, REPLICATION_NUM, BLOCK_SIZE);
assertTrue(dfs.dfs.exists(filestr));
// write random number of bytes into it.
int size = AppendTestUtil.nextInt(FILE_SIZE);
AppendTestUtil.LOG.info("size=" + size);
stm.write(buffer, 0, size);
// hflush file
AppendTestUtil.LOG.info("hflush");
stm.hflush();
AppendTestUtil.LOG.info("leasechecker.interruptAndJoin()");
dfs.dfs.getLeaseRenewer().interruptAndJoin();
// set the soft limit to be 1 second so that the
// namenode triggers lease recovery on next attempt to write-for-open.
cluster.setLeasePeriod(SHORT_LEASE_PERIOD, LONG_LEASE_PERIOD);
// try to re-open the file before closing the previous handle. This
// should fail but will trigger lease recovery.
{
UserGroupInformation ugi =
UserGroupInformation.createUserForTesting(fakeUsername,
new String [] { fakeGroup});
FileSystem dfs2 = DFSTestUtil.getFileSystemAs(ugi, conf);
boolean done = false;
for(int i = 0; i < 10 && !done; i++) {
AppendTestUtil.LOG.info("i=" + i);
try {
dfs2.create(filepath, false, BUF_SIZE, REPLICATION_NUM, BLOCK_SIZE);
fail("Creation of an existing file should never succeed.");
} catch (FileAlreadyExistsException ex) {
done = true;
} catch (AlreadyBeingCreatedException ex) {
AppendTestUtil.LOG.info("GOOD! got " + ex.getMessage());
} catch (IOException ioe) {
AppendTestUtil.LOG.warn("UNEXPECTED IOException", ioe);
}
if (!done) {
AppendTestUtil.LOG.info("sleep " + 5000 + "ms");
try {Thread.sleep(5000);} catch (InterruptedException e) {}
}
}
assertTrue(done);
}
AppendTestUtil.LOG.info("Lease for file " + filepath + " is recovered. "
+ "Validating its contents now...");
// verify that file-size matches
long fileSize = dfs.getFileStatus(filepath).getLen();
assertTrue("File should be " + size + " bytes, but is actually " +
" found to be " + fileSize + " bytes", fileSize == size);
// verify data
AppendTestUtil.LOG.info("File size is good. " +
"Now validating data and sizes from datanodes...");
AppendTestUtil.checkFullFile(dfs, filepath, size, buffer, filestr);
}
/**
* This test makes it so the client does not renew its lease and also
* set the hard lease expiration period to be short, thus triggering
* lease expiration to happen while the client is still alive. The test
* also causes the NN to restart after lease recovery has begun, but before
* the DNs have completed the blocks. This test verifies that when the NN
* comes back up, the client no longer holds the lease.
*
* The test makes sure that the lease recovery completes and the client
* fails if it continues to write to the file, even after NN restart.
*
* @throws Exception
*/
@Test
public void testHardLeaseRecoveryAfterNameNodeRestart() throws Exception {
hardLeaseRecoveryRestartHelper(false, -1);
}
@Test
public void testHardLeaseRecoveryAfterNameNodeRestart2() throws Exception {
hardLeaseRecoveryRestartHelper(false, 1535);
}
@Test
public void testHardLeaseRecoveryWithRenameAfterNameNodeRestart()
throws Exception {
hardLeaseRecoveryRestartHelper(true, -1);
}
public void hardLeaseRecoveryRestartHelper(boolean doRename, int size)
throws Exception {
if (size < 0) {
size = AppendTestUtil.nextInt(FILE_SIZE + 1);
}
//create a file
String fileStr = "/hardLeaseRecovery";
AppendTestUtil.LOG.info("filestr=" + fileStr);
Path filePath = new Path(fileStr);
FSDataOutputStream stm = dfs.create(filePath, true,
BUF_SIZE, REPLICATION_NUM, BLOCK_SIZE);
assertTrue(dfs.dfs.exists(fileStr));
// write bytes into the file.
AppendTestUtil.LOG.info("size=" + size);
stm.write(buffer, 0, size);
String originalLeaseHolder = NameNodeAdapter.getLeaseHolderForPath(
cluster.getNameNode(), fileStr);
assertFalse("original lease holder should not be the NN",
originalLeaseHolder.equals(HdfsServerConstants.NAMENODE_LEASE_HOLDER));
// hflush file
AppendTestUtil.LOG.info("hflush");
stm.hflush();
// check visible length
final HdfsDataInputStream in = (HdfsDataInputStream)dfs.open(filePath);
Assert.assertEquals(size, in.getVisibleLength());
in.close();
if (doRename) {
fileStr += ".renamed";
Path renamedPath = new Path(fileStr);
assertTrue(dfs.rename(filePath, renamedPath));
filePath = renamedPath;
}
// kill the lease renewal thread
AppendTestUtil.LOG.info("leasechecker.interruptAndJoin()");
dfs.dfs.getLeaseRenewer().interruptAndJoin();
// Make sure the DNs don't send a heartbeat for a while, so the blocks
// won't actually get completed during lease recovery.
for (DataNode dn : cluster.getDataNodes()) {
DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
}
// set the hard limit to be 1 second
cluster.setLeasePeriod(LONG_LEASE_PERIOD, SHORT_LEASE_PERIOD);
// Make sure lease recovery begins.
Thread.sleep(HdfsServerConstants.NAMENODE_LEASE_RECHECK_INTERVAL * 2);
checkLease(fileStr, size);
cluster.restartNameNode(false);
checkLease(fileStr, size);
// Let the DNs send heartbeats again.
for (DataNode dn : cluster.getDataNodes()) {
DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, false);
}
cluster.waitActive();
// set the hard limit to be 1 second, to initiate lease recovery.
cluster.setLeasePeriod(LONG_LEASE_PERIOD, SHORT_LEASE_PERIOD);
// wait for lease recovery to complete
LocatedBlocks locatedBlocks;
do {
Thread.sleep(SHORT_LEASE_PERIOD);
locatedBlocks = dfs.dfs.getLocatedBlocks(fileStr, 0L, size);
} while (locatedBlocks.isUnderConstruction());
assertEquals(size, locatedBlocks.getFileLength());
// make sure that the client can't write data anymore.
try {
stm.write('b');
stm.hflush();
fail("Should not be able to flush after we've lost the lease");
} catch (IOException e) {
LOG.info("Expceted exception on write/hflush", e);
}
try {
stm.close();
fail("Should not be able to close after we've lost the lease");
} catch (IOException e) {
LOG.info("Expected exception on close", e);
}
// verify data
AppendTestUtil.LOG.info(
"File size is good. Now validating sizes from datanodes...");
AppendTestUtil.checkFullFile(dfs, filePath, size, buffer, fileStr);
}
static void checkLease(String f, int size) {
final String holder = NameNodeAdapter.getLeaseHolderForPath(
cluster.getNameNode(), f);
if (size == 0) {
assertEquals("lease holder should null, file is closed", null, holder);
} else {
assertEquals("lease holder should now be the NN",
HdfsServerConstants.NAMENODE_LEASE_HOLDER, holder);
}
}
}
| 19,794 | 35.056466 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyBoolean;
import static org.mockito.Matchers.anyLong;
import static org.mockito.Matchers.anyObject;
import static org.mockito.Matchers.anyShort;
import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.timeout;
import static org.mockito.Mockito.when;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.InetSocketAddress;
import java.net.SocketTimeoutException;
import java.net.URI;
import java.security.MessageDigest;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Random;
import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.CryptoProtocolVersion;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileChecksum;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.client.impl.LeaseRenewer;
import org.apache.hadoop.hdfs.client.HdfsUtils;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.web.WebHdfsConstants;
import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.retry.RetryPolicies.MultipleLinearRandomRetry;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.apache.log4j.Level;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
import org.mockito.internal.stubbing.answers.ThrowsException;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import com.google.common.base.Joiner;
/**
* These tests make sure that DFSClient retries fetching data from DFS
* properly in case of errors.
*/
public class TestDFSClientRetries {
private static final String ADDRESS = "0.0.0.0";
final static private int PING_INTERVAL = 1000;
final static private int MIN_SLEEP_TIME = 1000;
public static final Log LOG =
LogFactory.getLog(TestDFSClientRetries.class.getName());
static private Configuration conf = null;
private static class TestServer extends Server {
private boolean sleep;
private Class<? extends Writable> responseClass;
public TestServer(int handlerCount, boolean sleep) throws IOException {
this(handlerCount, sleep, LongWritable.class, null);
}
public TestServer(int handlerCount, boolean sleep,
Class<? extends Writable> paramClass,
Class<? extends Writable> responseClass)
throws IOException {
super(ADDRESS, 0, paramClass, handlerCount, conf);
this.sleep = sleep;
this.responseClass = responseClass;
}
@Override
public Writable call(RPC.RpcKind rpcKind, String protocol, Writable param, long receiveTime)
throws IOException {
if (sleep) {
// sleep a bit
try {
Thread.sleep(PING_INTERVAL + MIN_SLEEP_TIME);
} catch (InterruptedException e) {}
}
if (responseClass != null) {
try {
return responseClass.newInstance();
} catch (Exception e) {
throw new RuntimeException(e);
}
} else {
return param; // echo param as result
}
}
}
// writes 'len' bytes of data to out.
private static void writeData(OutputStream out, int len) throws IOException {
byte [] buf = new byte[4096*16];
while(len > 0) {
int toWrite = Math.min(len, buf.length);
out.write(buf, 0, toWrite);
len -= toWrite;
}
}
@Before
public void setupConf(){
conf = new HdfsConfiguration();
}
/**
* This makes sure that when DN closes clients socket after client had
* successfully connected earlier, the data can still be fetched.
*/
@Test
public void testWriteTimeoutAtDataNode() throws IOException,
InterruptedException {
final int writeTimeout = 100; //milliseconds.
// set a very short write timeout for datanode, so that tests runs fast.
conf.setInt(DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY, writeTimeout);
// set a smaller block size
final int blockSize = 10*1024*1024;
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
conf.setInt(DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY, 1);
// set a small buffer size
final int bufferSize = 4096;
conf.setInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, bufferSize);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
try {
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
Path filePath = new Path("/testWriteTimeoutAtDataNode");
OutputStream out = fs.create(filePath, true, bufferSize);
// write a 2 block file.
writeData(out, 2*blockSize);
out.close();
byte[] buf = new byte[1024*1024]; // enough to empty TCP buffers.
InputStream in = fs.open(filePath, bufferSize);
//first read a few bytes
IOUtils.readFully(in, buf, 0, bufferSize/2);
//now read few more chunks of data by sleeping in between :
for(int i=0; i<10; i++) {
Thread.sleep(2*writeTimeout); // force write timeout at the datanode.
// read enough to empty out socket buffers.
IOUtils.readFully(in, buf, 0, buf.length);
}
// successfully read with write timeout on datanodes.
in.close();
} finally {
cluster.shutdown();
}
}
// more tests related to different failure cases can be added here.
/**
* Verify that client will correctly give up after the specified number
* of times trying to add a block
*/
@SuppressWarnings({ "serial", "unchecked" })
@Test
public void testNotYetReplicatedErrors() throws IOException
{
final String exceptionMsg = "Nope, not replicated yet...";
final int maxRetries = 1; // Allow one retry (total of two calls)
conf.setInt(HdfsClientConfigKeys.BlockWrite.LOCATEFOLLOWINGBLOCK_RETRIES_KEY, maxRetries);
NamenodeProtocols mockNN = mock(NamenodeProtocols.class);
Answer<Object> answer = new ThrowsException(new IOException()) {
int retryCount = 0;
@Override
public Object answer(InvocationOnMock invocation)
throws Throwable {
retryCount++;
System.out.println("addBlock has been called " + retryCount + " times");
if(retryCount > maxRetries + 1) // First call was not a retry
throw new IOException("Retried too many times: " + retryCount);
else
throw new RemoteException(NotReplicatedYetException.class.getName(),
exceptionMsg);
}
};
when(mockNN.addBlock(anyString(),
anyString(),
any(ExtendedBlock.class),
any(DatanodeInfo[].class),
anyLong(), any(String[].class))).thenAnswer(answer);
Mockito.doReturn(
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
(short) 777), "owner", "group", new byte[0], new byte[0],
1010, 0, null, (byte) 0)).when(mockNN).getFileInfo(anyString());
Mockito.doReturn(
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
(short) 777), "owner", "group", new byte[0], new byte[0],
1010, 0, null, (byte) 0))
.when(mockNN)
.create(anyString(), (FsPermission) anyObject(), anyString(),
(EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
anyShort(), anyLong(), (CryptoProtocolVersion[]) anyObject());
final DFSClient client = new DFSClient(null, mockNN, conf, null);
OutputStream os = client.create("testfile", true);
os.write(20); // write one random byte
try {
os.close();
} catch (Exception e) {
assertTrue("Retries are not being stopped correctly: " + e.getMessage(),
e.getMessage().equals(exceptionMsg));
}
}
/**
* This tests that DFSInputStream failures are counted for a given read
* operation, and not over the lifetime of the stream. It is a regression
* test for HDFS-127.
*/
@Test
public void testFailuresArePerOperation() throws Exception
{
long fileSize = 4096;
Path file = new Path("/testFile");
// Set short retry timeouts so this test runs faster
conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
conf.setInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, 2 * 1000);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
NamenodeProtocols preSpyNN = cluster.getNameNodeRpc();
NamenodeProtocols spyNN = spy(preSpyNN);
DFSClient client = new DFSClient(null, spyNN, conf, null);
int maxBlockAcquires = client.getConf().getMaxBlockAcquireFailures();
assertTrue(maxBlockAcquires > 0);
DFSTestUtil.createFile(fs, file, fileSize, (short)1, 12345L /*seed*/);
// If the client will retry maxBlockAcquires times, then if we fail
// any more than that number of times, the operation should entirely
// fail.
doAnswer(new FailNTimesAnswer(preSpyNN, maxBlockAcquires + 1))
.when(spyNN).getBlockLocations(anyString(), anyLong(), anyLong());
try {
IOUtils.copyBytes(client.open(file.toString()), new IOUtils.NullOutputStream(), conf,
true);
fail("Didn't get exception");
} catch (IOException ioe) {
DFSClient.LOG.info("Got expected exception", ioe);
}
// If we fail exactly that many times, then it should succeed.
doAnswer(new FailNTimesAnswer(preSpyNN, maxBlockAcquires))
.when(spyNN).getBlockLocations(anyString(), anyLong(), anyLong());
IOUtils.copyBytes(client.open(file.toString()), new IOUtils.NullOutputStream(), conf,
true);
DFSClient.LOG.info("Starting test case for failure reset");
// Now the tricky case - if we fail a few times on one read, then succeed,
// then fail some more on another read, it shouldn't fail.
doAnswer(new FailNTimesAnswer(preSpyNN, maxBlockAcquires))
.when(spyNN).getBlockLocations(anyString(), anyLong(), anyLong());
DFSInputStream is = client.open(file.toString());
byte buf[] = new byte[10];
IOUtils.readFully(is, buf, 0, buf.length);
DFSClient.LOG.info("First read successful after some failures.");
// Further reads at this point will succeed since it has the good block locations.
// So, force the block locations on this stream to be refreshed from bad info.
// When reading again, it should start from a fresh failure count, since
// we're starting a new operation on the user level.
doAnswer(new FailNTimesAnswer(preSpyNN, maxBlockAcquires))
.when(spyNN).getBlockLocations(anyString(), anyLong(), anyLong());
is.openInfo(true);
// Seek to beginning forces a reopen of the BlockReader - otherwise it'll
// just keep reading on the existing stream and the fact that we've poisoned
// the block info won't do anything.
is.seek(0);
IOUtils.readFully(is, buf, 0, buf.length);
} finally {
cluster.shutdown();
}
}
/**
* Test DFSClient can continue to function after renewLease RPC
* receives SocketTimeoutException.
*/
@Test
public void testLeaseRenewSocketTimeout() throws Exception
{
String file1 = "/testFile1";
String file2 = "/testFile2";
// Set short retry timeouts so this test runs faster
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
conf.setInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, 2 * 1000);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitActive();
NamenodeProtocols spyNN = spy(cluster.getNameNodeRpc());
Mockito.doThrow(new SocketTimeoutException()).when(spyNN).renewLease(
Mockito.anyString());
DFSClient client = new DFSClient(null, spyNN, conf, null);
// Get hold of the lease renewer instance used by the client
LeaseRenewer leaseRenewer = client.getLeaseRenewer();
leaseRenewer.setRenewalTime(100);
OutputStream out1 = client.create(file1, false);
Mockito.verify(spyNN, timeout(10000).times(1)).renewLease(
Mockito.anyString());
verifyEmptyLease(leaseRenewer);
try {
out1.write(new byte[256]);
fail("existing output stream should be aborted");
} catch (IOException e) {
}
// Verify DFSClient can do read operation after renewLease aborted.
client.exists(file2);
// Verify DFSClient can do write operation after renewLease no longer
// throws SocketTimeoutException.
Mockito.doNothing().when(spyNN).renewLease(
Mockito.anyString());
leaseRenewer = client.getLeaseRenewer();
leaseRenewer.setRenewalTime(100);
OutputStream out2 = client.create(file2, false);
Mockito.verify(spyNN, timeout(10000).times(2)).renewLease(
Mockito.anyString());
out2.write(new byte[256]);
out2.close();
verifyEmptyLease(leaseRenewer);
} finally {
cluster.shutdown();
}
}
/**
* Test that getAdditionalBlock() and close() are idempotent. This allows
* a client to safely retry a call and still produce a correct
* file. See HDFS-3031.
*/
@Test
public void testIdempotentAllocateBlockAndClose() throws Exception {
final String src = "/testIdempotentAllocateBlock";
Path file = new Path(src);
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
NamenodeProtocols preSpyNN = cluster.getNameNodeRpc();
NamenodeProtocols spyNN = spy(preSpyNN);
DFSClient client = new DFSClient(null, spyNN, conf, null);
// Make the call to addBlock() get called twice, as if it were retried
// due to an IPC issue.
doAnswer(new Answer<LocatedBlock>() {
@Override
public LocatedBlock answer(InvocationOnMock invocation) throws Throwable {
LocatedBlock ret = (LocatedBlock) invocation.callRealMethod();
LocatedBlocks lb = cluster.getNameNodeRpc().getBlockLocations(src, 0, Long.MAX_VALUE);
int blockCount = lb.getLocatedBlocks().size();
assertEquals(lb.getLastLocatedBlock().getBlock(), ret.getBlock());
// Retrying should result in a new block at the end of the file.
// (abandoning the old one)
LocatedBlock ret2 = (LocatedBlock) invocation.callRealMethod();
lb = cluster.getNameNodeRpc().getBlockLocations(src, 0, Long.MAX_VALUE);
int blockCount2 = lb.getLocatedBlocks().size();
assertEquals(lb.getLastLocatedBlock().getBlock(), ret2.getBlock());
// We shouldn't have gained an extra block by the RPC.
assertEquals(blockCount, blockCount2);
return ret2;
}
}).when(spyNN).addBlock(Mockito.anyString(), Mockito.anyString(),
Mockito.<ExtendedBlock> any(), Mockito.<DatanodeInfo[]> any(),
Mockito.anyLong(), Mockito.<String[]> any());
doAnswer(new Answer<Boolean>() {
@Override
public Boolean answer(InvocationOnMock invocation) throws Throwable {
// complete() may return false a few times before it returns
// true. We want to wait until it returns true, and then
// make it retry one more time after that.
LOG.info("Called complete(: " +
Joiner.on(",").join(invocation.getArguments()) + ")");
if (!(Boolean)invocation.callRealMethod()) {
LOG.info("Complete call returned false, not faking a retry RPC");
return false;
}
// We got a successful close. Call it again to check idempotence.
try {
boolean ret = (Boolean) invocation.callRealMethod();
LOG.info("Complete call returned true, faked second RPC. " +
"Returned: " + ret);
return ret;
} catch (Throwable t) {
LOG.error("Idempotent retry threw exception", t);
throw t;
}
}
}).when(spyNN).complete(Mockito.anyString(), Mockito.anyString(),
Mockito.<ExtendedBlock>any(), anyLong());
OutputStream stm = client.create(file.toString(), true);
try {
AppendTestUtil.write(stm, 0, 10000);
stm.close();
stm = null;
} finally {
IOUtils.cleanup(LOG, stm);
}
// Make sure the mock was actually properly injected.
Mockito.verify(spyNN, Mockito.atLeastOnce()).addBlock(
Mockito.anyString(), Mockito.anyString(),
Mockito.<ExtendedBlock> any(), Mockito.<DatanodeInfo[]> any(),
Mockito.anyLong(), Mockito.<String[]> any());
Mockito.verify(spyNN, Mockito.atLeastOnce()).complete(
Mockito.anyString(), Mockito.anyString(),
Mockito.<ExtendedBlock>any(), anyLong());
AppendTestUtil.check(fs, file, 10000);
} finally {
cluster.shutdown();
}
}
/**
* Mock Answer implementation of NN.getBlockLocations that will return
* a poisoned block list a certain number of times before returning
* a proper one.
*/
private static class FailNTimesAnswer implements Answer<LocatedBlocks> {
private int failuresLeft;
private final NamenodeProtocols realNN;
public FailNTimesAnswer(NamenodeProtocols preSpyNN, int timesToFail) {
failuresLeft = timesToFail;
this.realNN = preSpyNN;
}
@Override
public LocatedBlocks answer(InvocationOnMock invocation) throws IOException {
Object args[] = invocation.getArguments();
LocatedBlocks realAnswer = realNN.getBlockLocations(
(String)args[0],
(Long)args[1],
(Long)args[2]);
if (failuresLeft-- > 0) {
NameNode.LOG.info("FailNTimesAnswer injecting failure.");
return makeBadBlockList(realAnswer);
}
NameNode.LOG.info("FailNTimesAnswer no longer failing.");
return realAnswer;
}
private LocatedBlocks makeBadBlockList(LocatedBlocks goodBlockList) {
LocatedBlock goodLocatedBlock = goodBlockList.get(0);
LocatedBlock badLocatedBlock = new LocatedBlock(
goodLocatedBlock.getBlock(),
new DatanodeInfo[] {
DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234)
});
badLocatedBlock.setStartOffset(goodLocatedBlock.getStartOffset());
List<LocatedBlock> badBlocks = new ArrayList<LocatedBlock>();
badBlocks.add(badLocatedBlock);
return new LocatedBlocks(goodBlockList.getFileLength(), false,
badBlocks, null, true,
null);
}
}
/**
* Test that a DFSClient waits for random time before retry on busy blocks.
*/
@Test
public void testDFSClientRetriesOnBusyBlocks() throws IOException {
System.out.println("Testing DFSClient random waiting on busy blocks.");
//
// Test settings:
//
// xcievers fileLen #clients timeWindow #retries
// ======== ======= ======== ========== ========
// Test 1: 2 6 MB 50 300 ms 3
// Test 2: 2 6 MB 50 300 ms 50
// Test 3: 2 6 MB 50 1000 ms 3
// Test 4: 2 6 MB 50 1000 ms 50
//
// Minimum xcievers is 2 since 1 thread is reserved for registry.
// Test 1 & 3 may fail since # retries is low.
// Test 2 & 4 should never fail since (#threads)/(xcievers-1) is the upper
// bound for guarantee to not throw BlockMissingException.
//
int xcievers = 2;
int fileLen = 6*1024*1024;
int threads = 50;
int retries = 3;
int timeWin = 300;
//
// Test 1: might fail
//
long timestamp = Time.now();
boolean pass = busyTest(xcievers, threads, fileLen, timeWin, retries);
long timestamp2 = Time.now();
if ( pass ) {
LOG.info("Test 1 succeeded! Time spent: " + (timestamp2-timestamp)/1000.0 + " sec.");
} else {
LOG.warn("Test 1 failed, but relax. Time spent: " + (timestamp2-timestamp)/1000.0 + " sec.");
}
//
// Test 2: should never fail
//
retries = 50;
timestamp = Time.now();
pass = busyTest(xcievers, threads, fileLen, timeWin, retries);
timestamp2 = Time.now();
assertTrue("Something wrong! Test 2 got Exception with maxmum retries!", pass);
LOG.info("Test 2 succeeded! Time spent: " + (timestamp2-timestamp)/1000.0 + " sec.");
//
// Test 3: might fail
//
retries = 3;
timeWin = 1000;
timestamp = Time.now();
pass = busyTest(xcievers, threads, fileLen, timeWin, retries);
timestamp2 = Time.now();
if ( pass ) {
LOG.info("Test 3 succeeded! Time spent: " + (timestamp2-timestamp)/1000.0 + " sec.");
} else {
LOG.warn("Test 3 failed, but relax. Time spent: " + (timestamp2-timestamp)/1000.0 + " sec.");
}
//
// Test 4: should never fail
//
retries = 50;
timeWin = 1000;
timestamp = Time.now();
pass = busyTest(xcievers, threads, fileLen, timeWin, retries);
timestamp2 = Time.now();
assertTrue("Something wrong! Test 4 got Exception with maxmum retries!", pass);
LOG.info("Test 4 succeeded! Time spent: " + (timestamp2-timestamp)/1000.0 + " sec.");
}
private boolean busyTest(int xcievers, int threads, int fileLen, int timeWin, int retries)
throws IOException {
boolean ret = true;
short replicationFactor = 1;
long blockSize = 128*1024*1024; // DFS block size
int bufferSize = 4096;
conf.setInt(DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY, xcievers);
conf.setInt(DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY,
retries);
conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, timeWin);
// Disable keepalive
conf.setInt(DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY, 0);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(replicationFactor).build();
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
Path file1 = new Path("test_data.dat");
file1 = file1.makeQualified(fs.getUri(), fs.getWorkingDirectory()); // make URI hdfs://
try {
FSDataOutputStream stm = fs.create(file1, true,
bufferSize,
replicationFactor,
blockSize);
// verify that file exists in FS namespace
assertTrue(file1 + " should be a file",
fs.getFileStatus(file1).isFile());
System.out.println("Path : \"" + file1 + "\"");
LOG.info("Path : \"" + file1 + "\"");
// write 1 block to file
byte[] buffer = AppendTestUtil.randomBytes(Time.now(), fileLen);
stm.write(buffer, 0, fileLen);
stm.close();
// verify that file size has changed to the full size
long len = fs.getFileStatus(file1).getLen();
assertTrue(file1 + " should be of size " + fileLen +
" but found to be of size " + len,
len == fileLen);
// read back and check data integrigy
byte[] read_buf = new byte[fileLen];
InputStream in = fs.open(file1, fileLen);
IOUtils.readFully(in, read_buf, 0, fileLen);
assert(Arrays.equals(buffer, read_buf));
in.close();
read_buf = null; // GC it if needed
// compute digest of the content to reduce memory space
MessageDigest m = MessageDigest.getInstance("SHA");
m.update(buffer, 0, fileLen);
byte[] hash_sha = m.digest();
// spawn multiple threads and all trying to access the same block
Thread[] readers = new Thread[threads];
Counter counter = new Counter(0);
for (int i = 0; i < threads; ++i ) {
DFSClientReader reader = new DFSClientReader(file1, cluster, hash_sha, fileLen, counter);
readers[i] = new Thread(reader);
readers[i].start();
}
// wait for them to exit
for (int i = 0; i < threads; ++i ) {
readers[i].join();
}
if ( counter.get() == threads )
ret = true;
else
ret = false;
} catch (InterruptedException e) {
System.out.println("Thread got InterruptedException.");
e.printStackTrace();
ret = false;
} catch (Exception e) {
e.printStackTrace();
ret = false;
} finally {
fs.delete(file1, false);
cluster.shutdown();
}
return ret;
}
private void verifyEmptyLease(LeaseRenewer leaseRenewer) throws Exception {
int sleepCount = 0;
while (!leaseRenewer.isEmpty() && sleepCount++ < 20) {
Thread.sleep(500);
}
assertTrue("Lease should be empty.", leaseRenewer.isEmpty());
}
class DFSClientReader implements Runnable {
DFSClient client;
final Configuration conf;
final byte[] expected_sha;
FileSystem fs;
final Path filePath;
final MiniDFSCluster cluster;
final int len;
final Counter counter;
DFSClientReader(Path file, MiniDFSCluster cluster, byte[] hash_sha, int fileLen, Counter cnt) {
filePath = file;
this.cluster = cluster;
counter = cnt;
len = fileLen;
conf = new HdfsConfiguration();
expected_sha = hash_sha;
try {
cluster.waitActive();
} catch (IOException e) {
e.printStackTrace();
}
}
@Override
public void run() {
try {
fs = cluster.getNewFileSystemInstance(0);
int bufferSize = len;
byte[] buf = new byte[bufferSize];
InputStream in = fs.open(filePath, bufferSize);
// read the whole file
IOUtils.readFully(in, buf, 0, bufferSize);
// compare with the expected input
MessageDigest m = MessageDigest.getInstance("SHA");
m.update(buf, 0, bufferSize);
byte[] hash_sha = m.digest();
buf = null; // GC if needed since there may be too many threads
in.close();
fs.close();
assertTrue("hashed keys are not the same size",
hash_sha.length == expected_sha.length);
assertTrue("hashed keys are not equal",
Arrays.equals(hash_sha, expected_sha));
counter.inc(); // count this thread as successful
LOG.info("Thread correctly read the block.");
} catch (BlockMissingException e) {
LOG.info("Bad - BlockMissingException is caught.");
e.printStackTrace();
} catch (Exception e) {
e.printStackTrace();
}
}
}
class Counter {
int counter;
Counter(int n) { counter = n; }
public synchronized void inc() { ++counter; }
public int get() { return counter; }
}
@Test
public void testGetFileChecksum() throws Exception {
final String f = "/testGetFileChecksum";
final Path p = new Path(f);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
try {
cluster.waitActive();
//create a file
final FileSystem fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, p, 1L << 20, (short)3, 20100402L);
//get checksum
final FileChecksum cs1 = fs.getFileChecksum(p);
assertTrue(cs1 != null);
//stop the first datanode
final List<LocatedBlock> locatedblocks = DFSClient.callGetBlockLocations(
cluster.getNameNodeRpc(), f, 0, Long.MAX_VALUE)
.getLocatedBlocks();
final DatanodeInfo first = locatedblocks.get(0).getLocations()[0];
cluster.stopDataNode(first.getXferAddr());
//get checksum again
final FileChecksum cs2 = fs.getFileChecksum(p);
assertEquals(cs1, cs2);
} finally {
cluster.shutdown();
}
}
/** Test that timeout occurs when DN does not respond to RPC.
* Start up a server and ask it to sleep for n seconds. Make an
* RPC to the server and set rpcTimeout to less than n and ensure
* that socketTimeoutException is obtained
*/
@Test
public void testClientDNProtocolTimeout() throws IOException {
final Server server = new TestServer(1, true);
server.start();
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);
ClientDatanodeProtocol proxy = null;
try {
proxy = DFSUtil.createClientDatanodeProtocolProxy(
fakeDnId, conf, 500, false, fakeBlock);
proxy.getReplicaVisibleLength(new ExtendedBlock("bpid", 1));
fail ("Did not get expected exception: SocketTimeoutException");
} catch (SocketTimeoutException e) {
LOG.info("Got the expected Exception: SocketTimeoutException");
} finally {
if (proxy != null) {
RPC.stopProxy(proxy);
}
server.stop();
}
}
/** Test client retry with namenode restarting. */
@Test(timeout=300000)
public void testNamenodeRestart() throws Exception {
namenodeRestartTest(new Configuration(), false);
}
public static void namenodeRestartTest(final Configuration conf,
final boolean isWebHDFS) throws Exception {
((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
final List<Exception> exceptions = new ArrayList<Exception>();
final Path dir = new Path("/testNamenodeRestart");
if (isWebHDFS) {
conf.setBoolean(HdfsClientConfigKeys.HttpClient.RETRY_POLICY_ENABLED_KEY, true);
} else {
conf.setBoolean(HdfsClientConfigKeys.Retry.POLICY_ENABLED_KEY, true);
}
conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY, 1);
conf.setInt(MiniDFSCluster.DFS_NAMENODE_SAFEMODE_EXTENSION_TESTING_KEY, 5000);
final short numDatanodes = 3;
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(numDatanodes)
.build();
try {
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final FileSystem fs = isWebHDFS ? WebHdfsTestUtil.getWebHdfsFileSystem(
conf, WebHdfsConstants.WEBHDFS_SCHEME) : dfs;
final URI uri = dfs.getUri();
assertTrue(HdfsUtils.isHealthy(uri));
//create a file
final long length = 1L << 20;
final Path file1 = new Path(dir, "foo");
DFSTestUtil.createFile(fs, file1, length, numDatanodes, 20120406L);
//get file status
final FileStatus s1 = fs.getFileStatus(file1);
assertEquals(length, s1.getLen());
//create file4, write some data but not close
final Path file4 = new Path(dir, "file4");
final FSDataOutputStream out4 = fs.create(file4, false, 4096,
fs.getDefaultReplication(file4), 1024L, null);
final byte[] bytes = new byte[1000];
new Random().nextBytes(bytes);
out4.write(bytes);
out4.write(bytes);
if (isWebHDFS) {
// WebHDFS does not support hflush. To avoid DataNode communicating with
// NN while we're shutting down NN, we call out4.close() to finish
// writing the data
out4.close();
} else {
out4.hflush();
}
//shutdown namenode
assertTrue(HdfsUtils.isHealthy(uri));
cluster.shutdownNameNode(0);
assertFalse(HdfsUtils.isHealthy(uri));
//namenode is down, continue writing file4 in a thread
final Thread file4thread = new Thread(new Runnable() {
@Override
public void run() {
try {
//write some more data and then close the file
if (!isWebHDFS) {
out4.write(bytes);
out4.write(bytes);
out4.write(bytes);
out4.close();
}
} catch (Exception e) {
exceptions.add(e);
}
}
});
file4thread.start();
//namenode is down, read the file in a thread
final Thread reader = new Thread(new Runnable() {
@Override
public void run() {
try {
//it should retry till namenode is up.
final FileSystem fs = createFsWithDifferentUsername(conf, isWebHDFS);
final FSDataInputStream in = fs.open(file1);
int count = 0;
for(; in.read() != -1; count++);
in.close();
assertEquals(s1.getLen(), count);
} catch (Exception e) {
exceptions.add(e);
}
}
});
reader.start();
//namenode is down, create another file in a thread
final Path file3 = new Path(dir, "file");
final Thread thread = new Thread(new Runnable() {
@Override
public void run() {
try {
//it should retry till namenode is up.
final FileSystem fs = createFsWithDifferentUsername(conf, isWebHDFS);
DFSTestUtil.createFile(fs, file3, length, numDatanodes, 20120406L);
} catch (Exception e) {
exceptions.add(e);
}
}
});
thread.start();
//restart namenode in a new thread
new Thread(new Runnable() {
@Override
public void run() {
try {
//sleep, restart, and then wait active
TimeUnit.SECONDS.sleep(30);
assertFalse(HdfsUtils.isHealthy(uri));
cluster.restartNameNode(0, false);
cluster.waitActive();
assertTrue(HdfsUtils.isHealthy(uri));
} catch (Exception e) {
exceptions.add(e);
}
}
}).start();
//namenode is down, it should retry until namenode is up again.
final FileStatus s2 = fs.getFileStatus(file1);
assertEquals(s1, s2);
//check file1 and file3
thread.join();
assertEmpty(exceptions);
assertEquals(s1.getLen(), fs.getFileStatus(file3).getLen());
assertEquals(fs.getFileChecksum(file1), fs.getFileChecksum(file3));
reader.join();
assertEmpty(exceptions);
//check file4
file4thread.join();
assertEmpty(exceptions);
{
final FSDataInputStream in = fs.open(file4);
int count = 0;
for(int r; (r = in.read()) != -1; count++) {
Assert.assertEquals(String.format("count=%d", count),
bytes[count % bytes.length], (byte)r);
}
if (!isWebHDFS) {
Assert.assertEquals(5 * bytes.length, count);
} else {
Assert.assertEquals(2 * bytes.length, count);
}
in.close();
}
//enter safe mode
assertTrue(HdfsUtils.isHealthy(uri));
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
assertFalse(HdfsUtils.isHealthy(uri));
//leave safe mode in a new thread
new Thread(new Runnable() {
@Override
public void run() {
try {
//sleep and then leave safe mode
TimeUnit.SECONDS.sleep(30);
assertFalse(HdfsUtils.isHealthy(uri));
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
assertTrue(HdfsUtils.isHealthy(uri));
} catch (Exception e) {
exceptions.add(e);
}
}
}).start();
//namenode is in safe mode, create should retry until it leaves safe mode.
final Path file2 = new Path(dir, "bar");
DFSTestUtil.createFile(fs, file2, length, numDatanodes, 20120406L);
assertEquals(fs.getFileChecksum(file1), fs.getFileChecksum(file2));
assertTrue(HdfsUtils.isHealthy(uri));
//make sure it won't retry on exceptions like FileNotFoundException
final Path nonExisting = new Path(dir, "nonExisting");
LOG.info("setPermission: " + nonExisting);
try {
fs.setPermission(nonExisting, new FsPermission((short)0));
fail();
} catch(FileNotFoundException fnfe) {
LOG.info("GOOD!", fnfe);
}
assertEmpty(exceptions);
} finally {
cluster.shutdown();
}
}
static void assertEmpty(final List<Exception> exceptions) {
if (!exceptions.isEmpty()) {
final StringBuilder b = new StringBuilder("There are ")
.append(exceptions.size())
.append(" exception(s):");
for(int i = 0; i < exceptions.size(); i++) {
b.append("\n Exception ")
.append(i)
.append(": ")
.append(StringUtils.stringifyException(exceptions.get(i)));
}
fail(b.toString());
}
}
private static FileSystem createFsWithDifferentUsername(
final Configuration conf, final boolean isWebHDFS
) throws IOException, InterruptedException {
final String username = UserGroupInformation.getCurrentUser(
).getShortUserName() + "_XXX";
final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
username, new String[]{"supergroup"});
return isWebHDFS? WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, conf, WebHdfsConstants.WEBHDFS_SCHEME)
: DFSTestUtil.getFileSystemAs(ugi, conf);
}
@Test
public void testMultipleLinearRandomRetry() {
parseMultipleLinearRandomRetry(null, "");
parseMultipleLinearRandomRetry(null, "11");
parseMultipleLinearRandomRetry(null, "11,22,33");
parseMultipleLinearRandomRetry(null, "11,22,33,44,55");
parseMultipleLinearRandomRetry(null, "AA");
parseMultipleLinearRandomRetry(null, "11,AA");
parseMultipleLinearRandomRetry(null, "11,22,33,FF");
parseMultipleLinearRandomRetry(null, "11,-22");
parseMultipleLinearRandomRetry(null, "-11,22");
parseMultipleLinearRandomRetry("[22x11ms]",
"11,22");
parseMultipleLinearRandomRetry("[22x11ms, 44x33ms]",
"11,22,33,44");
parseMultipleLinearRandomRetry("[22x11ms, 44x33ms, 66x55ms]",
"11,22,33,44,55,66");
parseMultipleLinearRandomRetry("[22x11ms, 44x33ms, 66x55ms]",
" 11, 22, 33, 44, 55, 66 ");
}
static void parseMultipleLinearRandomRetry(String expected, String s) {
final MultipleLinearRandomRetry r = MultipleLinearRandomRetry.parseCommaSeparatedString(s);
LOG.info("input=" + s + ", parsed=" + r + ", expected=" + expected);
if (r == null) {
assertEquals(expected, null);
} else {
assertEquals("MultipleLinearRandomRetry" + expected, r.toString());
}
}
/**
* Test that checksum failures are recovered from by the next read on the same
* DFSInputStream. Corruption information is not persisted from read call to
* read call, so the client should expect consecutive calls to behave the same
* way. See HDFS-3067.
*/
@Test
public void testRetryOnChecksumFailure() throws Exception {
HdfsConfiguration conf = new HdfsConfiguration();
MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
try {
final short REPL_FACTOR = 1;
final long FILE_LENGTH = 512L;
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
Path path = new Path("/corrupted");
DFSTestUtil.createFile(fs, path, FILE_LENGTH, REPL_FACTOR, 12345L);
DFSTestUtil.waitReplication(fs, path, REPL_FACTOR);
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, path);
int blockFilesCorrupted = cluster.corruptBlockOnDataNodes(block);
assertEquals("All replicas not corrupted", REPL_FACTOR,
blockFilesCorrupted);
InetSocketAddress nnAddr =
new InetSocketAddress("localhost", cluster.getNameNodePort());
DFSClient client = new DFSClient(nnAddr, conf);
DFSInputStream dis = client.open(path.toString());
byte[] arr = new byte[(int)FILE_LENGTH];
for (int i = 0; i < 2; ++i) {
try {
dis.read(arr, 0, (int)FILE_LENGTH);
fail("Expected ChecksumException not thrown");
} catch (Exception ex) {
GenericTestUtils.assertExceptionContains(
"Checksum error", ex);
}
}
} finally {
cluster.shutdown();
}
}
@Test
public void testDFSClientConfigurationLocateFollowingBlockInitialDelay()
throws Exception {
// test if HdfsClientConfigKeys.BlockWrite.LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_KEY
// is not configured, verify DFSClient uses the default value 400.
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitActive();
NamenodeProtocols nn = cluster.getNameNodeRpc();
DFSClient client = new DFSClient(null, nn, conf, null);
assertEquals(client.getConf().
getBlockWriteLocateFollowingInitialDelayMs(), 400);
// change HdfsClientConfigKeys.BlockWrite.LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_KEY,
// verify DFSClient uses the configured value 1000.
conf.setInt(
HdfsClientConfigKeys.BlockWrite.LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_MS_KEY,
1000);
client = new DFSClient(null, nn, conf, null);
assertEquals(client.getConf().
getBlockWriteLocateFollowingInitialDelayMs(), 1000);
} finally {
cluster.shutdown();
}
}
}
| 44,553 | 35.639803 | 104 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFetchImage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import java.io.File;
import java.net.URI;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.tools.DFSAdmin;
import org.apache.hadoop.hdfs.util.MD5FileUtils;
import org.apache.hadoop.io.MD5Hash;
import org.junit.AfterClass;
import org.junit.Test;
public class TestFetchImage {
private static final File FETCHED_IMAGE_FILE = new File(
System.getProperty("test.build.dir"), "target/fetched-image-dir");
// Shamelessly stolen from NNStorage.
private static final Pattern IMAGE_REGEX = Pattern.compile("fsimage_(\\d+)");
@AfterClass
public static void cleanup() {
FileUtil.fullyDelete(FETCHED_IMAGE_FILE);
}
/**
* Download a few fsimages using `hdfs dfsadmin -fetchImage ...' and verify
* the results.
*/
@Test
public void testFetchImage() throws Exception {
FETCHED_IMAGE_FILE.mkdirs();
Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = null;
try {
DFSAdmin dfsAdmin = new DFSAdmin();
dfsAdmin.setConf(conf);
runFetchImage(dfsAdmin, cluster);
fs = cluster.getFileSystem();
fs.mkdirs(new Path("/foo"));
fs.mkdirs(new Path("/foo2"));
fs.mkdirs(new Path("/foo3"));
cluster.getNameNodeRpc()
.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
cluster.getNameNodeRpc().saveNamespace();
cluster.getNameNodeRpc()
.setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
runFetchImage(dfsAdmin, cluster);
} finally {
if (fs != null) {
fs.close();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
/**
* Run `hdfs dfsadmin -fetchImage ...' and verify that the downloaded image is
* correct.
*/
private static void runFetchImage(DFSAdmin dfsAdmin, MiniDFSCluster cluster)
throws Exception {
int retVal = dfsAdmin.run(new String[]{"-fetchImage",
FETCHED_IMAGE_FILE.getPath() });
assertEquals(0, retVal);
File highestImageOnNn = getHighestFsImageOnCluster(cluster);
MD5Hash expected = MD5FileUtils.computeMd5ForFile(highestImageOnNn);
MD5Hash actual = MD5FileUtils.computeMd5ForFile(
new File(FETCHED_IMAGE_FILE, highestImageOnNn.getName()));
assertEquals(expected, actual);
}
/**
* @return the fsimage with highest transaction ID in the cluster.
*/
private static File getHighestFsImageOnCluster(MiniDFSCluster cluster) {
long highestImageTxId = -1;
File highestImageOnNn = null;
for (URI nameDir : cluster.getNameDirs(0)) {
for (File imageFile : new File(new File(nameDir), "current").listFiles()) {
Matcher imageMatch = IMAGE_REGEX.matcher(imageFile.getName());
if (imageMatch.matches()) {
long imageTxId = Long.parseLong(imageMatch.group(1));
if (imageTxId > highestImageTxId) {
highestImageTxId = imageTxId;
highestImageOnNn = imageFile;
}
}
}
}
return highestImageOnNn;
}
}
| 4,221 | 31.984375 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.util.Time;
import org.apache.log4j.Level;
import org.apache.log4j.LogManager;
import org.junit.Ignore;
import org.junit.Test;
/**
* Driver class for testing the use of DFSInputStream by multiple concurrent
* readers, using the different read APIs.
*
* This class is marked as @Ignore so that junit doesn't try to execute the
* tests in here directly. They are executed from subclasses.
*/
@Ignore
public class TestParallelReadUtil {
static final Log LOG = LogFactory.getLog(TestParallelReadUtil.class);
static BlockReaderTestUtil util = null;
static DFSClient dfsClient = null;
static final int FILE_SIZE_K = 256;
static Random rand = null;
static final int DEFAULT_REPLICATION_FACTOR = 2;
protected boolean verifyChecksums = true;
static {
// The client-trace log ends up causing a lot of blocking threads
// in this when it's being used as a performance benchmark.
LogManager.getLogger(DataNode.class.getName() + ".clienttrace")
.setLevel(Level.WARN);
}
private class TestFileInfo {
public DFSInputStream dis;
public Path filepath;
public byte[] authenticData;
}
public static void setupCluster(int replicationFactor, HdfsConfiguration conf) throws Exception {
util = new BlockReaderTestUtil(replicationFactor, conf);
dfsClient = util.getDFSClient();
long seed = Time.now();
LOG.info("Random seed: " + seed);
rand = new Random(seed);
}
/**
* Providers of this interface implement two different read APIs. Instances of
* this interface are shared across all ReadWorkerThreads, so should be stateless.
*/
static interface ReadWorkerHelper {
public int read(DFSInputStream dis, byte[] target, int startOff, int len) throws IOException;
public int pRead(DFSInputStream dis, byte[] target, int startOff, int len) throws IOException;
}
/**
* Uses read(ByteBuffer...) style APIs
*/
static class DirectReadWorkerHelper implements ReadWorkerHelper {
@Override
public int read(DFSInputStream dis, byte[] target, int startOff, int len) throws IOException {
ByteBuffer bb = ByteBuffer.allocateDirect(target.length);
int cnt = 0;
synchronized(dis) {
dis.seek(startOff);
while (cnt < len) {
int read = dis.read(bb);
if (read == -1) {
return read;
}
cnt += read;
}
}
bb.clear();
bb.get(target);
return cnt;
}
@Override
public int pRead(DFSInputStream dis, byte[] target, int startOff, int len) throws IOException {
// No pRead for bb read path
return read(dis, target, startOff, len);
}
}
/**
* Uses the read(byte[]...) style APIs
*/
static class CopyingReadWorkerHelper implements ReadWorkerHelper {
@Override
public int read(DFSInputStream dis, byte[] target, int startOff, int len)
throws IOException {
int cnt = 0;
synchronized(dis) {
dis.seek(startOff);
while (cnt < len) {
int read = dis.read(target, cnt, len - cnt);
if (read == -1) {
return read;
}
cnt += read;
}
}
return cnt;
}
@Override
public int pRead(DFSInputStream dis, byte[] target, int startOff, int len)
throws IOException {
int cnt = 0;
while (cnt < len) {
int read = dis.read(startOff, target, cnt, len - cnt);
if (read == -1) {
return read;
}
cnt += read;
}
return cnt;
}
}
/**
* Uses a mix of both copying
*/
static class MixedWorkloadHelper implements ReadWorkerHelper {
private final DirectReadWorkerHelper bb = new DirectReadWorkerHelper();
private final CopyingReadWorkerHelper copy = new CopyingReadWorkerHelper();
private final double COPYING_PROBABILITY = 0.5;
@Override
public int read(DFSInputStream dis, byte[] target, int startOff, int len)
throws IOException {
double p = rand.nextDouble();
if (p > COPYING_PROBABILITY) {
return bb.read(dis, target, startOff, len);
} else {
return copy.read(dis, target, startOff, len);
}
}
@Override
public int pRead(DFSInputStream dis, byte[] target, int startOff, int len)
throws IOException {
double p = rand.nextDouble();
if (p > COPYING_PROBABILITY) {
return bb.pRead(dis, target, startOff, len);
} else {
return copy.pRead(dis, target, startOff, len);
}
}
}
/**
* A worker to do one "unit" of read.
*/
static class ReadWorker extends Thread {
static public final int N_ITERATIONS = 1024;
private static final double PROPORTION_NON_POSITIONAL_READ = 0.10;
private final TestFileInfo testInfo;
private final long fileSize;
private long bytesRead;
private boolean error;
private final ReadWorkerHelper helper;
ReadWorker(TestFileInfo testInfo, int id, ReadWorkerHelper helper) {
super("ReadWorker-" + id + "-" + testInfo.filepath.toString());
this.testInfo = testInfo;
this.helper = helper;
fileSize = testInfo.dis.getFileLength();
assertEquals(fileSize, testInfo.authenticData.length);
bytesRead = 0;
error = false;
}
/**
* Randomly do one of (1) Small read; and (2) Large Pread.
*/
@Override
public void run() {
for (int i = 0; i < N_ITERATIONS; ++i) {
int startOff = rand.nextInt((int) fileSize);
int len = 0;
try {
double p = rand.nextDouble();
if (p < PROPORTION_NON_POSITIONAL_READ) {
// Do a small regular read. Very likely this will leave unread
// data on the socket and make the socket uncacheable.
len = Math.min(rand.nextInt(64), (int) fileSize - startOff);
read(startOff, len);
bytesRead += len;
} else {
// Do a positional read most of the time.
len = rand.nextInt((int) (fileSize - startOff));
pRead(startOff, len);
bytesRead += len;
}
} catch (Throwable t) {
LOG.error(getName() + ": Error while testing read at " + startOff +
" length " + len, t);
error = true;
fail(t.getMessage());
}
}
}
public long getBytesRead() {
return bytesRead;
}
/**
* Raising error in a thread doesn't seem to fail the test.
* So check afterwards.
*/
public boolean hasError() {
return error;
}
static int readCount = 0;
/**
* Seek to somewhere random and read.
*/
private void read(int start, int len) throws Exception {
assertTrue(
"Bad args: " + start + " + " + len + " should be <= " + fileSize,
start + len <= fileSize);
readCount++;
DFSInputStream dis = testInfo.dis;
byte buf[] = new byte[len];
helper.read(dis, buf, start, len);
verifyData("Read data corrupted", buf, start, start + len);
}
/**
* Positional read.
*/
private void pRead(int start, int len) throws Exception {
assertTrue(
"Bad args: " + start + " + " + len + " should be <= " + fileSize,
start + len <= fileSize);
DFSInputStream dis = testInfo.dis;
byte buf[] = new byte[len];
helper.pRead(dis, buf, start, len);
verifyData("Pread data corrupted", buf, start, start + len);
}
/**
* Verify read data vs authentic data
*/
private void verifyData(String msg, byte actual[], int start, int end)
throws Exception {
byte auth[] = testInfo.authenticData;
if (end > auth.length) {
throw new Exception(msg + ": Actual array (" + end +
") is past the end of authentic data (" +
auth.length + ")");
}
int j = start;
for (int i = 0; i < actual.length; ++i, ++j) {
if (auth[j] != actual[i]) {
throw new Exception(msg + ": Arrays byte " + i + " (at offset " +
j + ") differs: expect " +
auth[j] + " got " + actual[i]);
}
}
}
}
/**
* Start the parallel read with the given parameters.
*/
boolean runParallelRead(int nFiles, int nWorkerEach, ReadWorkerHelper helper) throws IOException {
ReadWorker workers[] = new ReadWorker[nFiles * nWorkerEach];
TestFileInfo testInfoArr[] = new TestFileInfo[nFiles];
// Prepare the files and workers
int nWorkers = 0;
for (int i = 0; i < nFiles; ++i) {
TestFileInfo testInfo = new TestFileInfo();
testInfoArr[i] = testInfo;
testInfo.filepath = new Path("/TestParallelRead.dat." + i);
testInfo.authenticData = util.writeFile(testInfo.filepath, FILE_SIZE_K);
testInfo.dis = dfsClient.open(testInfo.filepath.toString(),
dfsClient.getConf().getIoBufferSize(), verifyChecksums);
for (int j = 0; j < nWorkerEach; ++j) {
workers[nWorkers++] = new ReadWorker(testInfo, nWorkers, helper);
}
}
// Start the workers and wait
long starttime = Time.monotonicNow();
for (ReadWorker worker : workers) {
worker.start();
}
for (ReadWorker worker : workers) {
try {
worker.join();
} catch (InterruptedException ignored) { }
}
long endtime = Time.monotonicNow();
// Cleanup
for (TestFileInfo testInfo : testInfoArr) {
testInfo.dis.close();
}
// Report
boolean res = true;
long totalRead = 0;
for (ReadWorker worker : workers) {
long nread = worker.getBytesRead();
LOG.info("--- Report: " + worker.getName() + " read " + nread + " B; " +
"average " + nread / ReadWorker.N_ITERATIONS + " B per read");
totalRead += nread;
if (worker.hasError()) {
res = false;
}
}
double timeTakenSec = (endtime - starttime) / 1000.0;
long totalReadKB = totalRead / 1024;
LOG.info("=== Report: " + nWorkers + " threads read " +
totalReadKB + " KB (across " +
nFiles + " file(s)) in " +
timeTakenSec + "s; average " +
totalReadKB / timeTakenSec + " KB/s");
return res;
}
/**
* Runs a standard workload using a helper class which provides the read
* implementation to use.
*/
public void runTestWorkload(ReadWorkerHelper helper) throws IOException {
if (!runParallelRead(1, 4, helper)) {
fail("Check log for errors");
}
if (!runParallelRead(1, 16, helper)) {
fail("Check log for errors");
}
if (!runParallelRead(2, 4, helper)) {
fail("Check log for errors");
}
}
public static void teardownCluster() throws Exception {
util.shutdown();
}
/**
* Do parallel read several times with different number of files and threads.
*
* Note that while this is the only "test" in a junit sense, we're actually
* dispatching a lot more. Failures in the other methods (and other threads)
* need to be manually collected, which is inconvenient.
*/
@Test
public void testParallelReadCopying() throws IOException {
runTestWorkload(new CopyingReadWorkerHelper());
}
@Test
public void testParallelReadByteBuffer() throws IOException {
runTestWorkload(new DirectReadWorkerHelper());
}
@Test
public void testParallelReadMixed() throws IOException {
runTestWorkload(new MixedWorkloadHelper());
}
@Test
public void testParallelNoChecksums() throws IOException {
verifyChecksums = false;
runTestWorkload(new MixedWorkloadHelper());
}
}
| 12,923 | 29.481132 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.hamcrest.CoreMatchers.equalTo;
import java.io.EOFException;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.nio.ByteBuffer;
import java.util.UUID;
import java.util.concurrent.TimeoutException;
import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplica;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.ShmId;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.unix.DomainSocket;
import org.apache.hadoop.net.unix.TemporarySocketDirectory;
import org.apache.hadoop.util.Time;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.Assume;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestBlockReaderLocal {
private static TemporarySocketDirectory sockDir;
@BeforeClass
public static void init() {
sockDir = new TemporarySocketDirectory();
DomainSocket.disableBindPathValidation();
}
@AfterClass
public static void shutdown() throws IOException {
sockDir.close();
}
public static void assertArrayRegionsEqual(byte []buf1, int off1, byte []buf2,
int off2, int len) {
for (int i = 0; i < len; i++) {
if (buf1[off1 + i] != buf2[off2 + i]) {
Assert.fail("arrays differ at byte " + i + ". " +
"The first array has " + (int)buf1[off1 + i] +
", but the second array has " + (int)buf2[off2 + i]);
}
}
}
/**
* Similar to IOUtils#readFully(). Reads bytes in a loop.
*
* @param reader The BlockReaderLocal to read bytes from
* @param buf The ByteBuffer to read into
* @param off The offset in the buffer to read into
* @param len The number of bytes to read.
*
* @throws IOException If it could not read the requested number of bytes
*/
private static void readFully(BlockReaderLocal reader,
ByteBuffer buf, int off, int len) throws IOException {
int amt = len;
while (amt > 0) {
buf.limit(off + len);
buf.position(off);
long ret = reader.read(buf);
if (ret < 0) {
throw new EOFException( "Premature EOF from BlockReaderLocal " +
"after reading " + (len - amt) + " byte(s).");
}
amt -= ret;
off += ret;
}
}
private static class BlockReaderLocalTest {
final static int TEST_LENGTH = 12345;
final static int BYTES_PER_CHECKSUM = 512;
public void setConfiguration(HdfsConfiguration conf) {
// default: no-op
}
public void setup(File blockFile, boolean usingChecksums)
throws IOException {
// default: no-op
}
public void doTest(BlockReaderLocal reader, byte original[])
throws IOException {
// default: no-op
}
}
public void runBlockReaderLocalTest(BlockReaderLocalTest test,
boolean checksum, long readahead) throws IOException {
Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
MiniDFSCluster cluster = null;
HdfsConfiguration conf = new HdfsConfiguration();
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY,
!checksum);
conf.setLong(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY,
BlockReaderLocalTest.BYTES_PER_CHECKSUM);
conf.set(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY, "CRC32C");
conf.setLong(DFSConfigKeys.DFS_CLIENT_CACHE_READAHEAD, readahead);
test.setConfiguration(conf);
FileInputStream dataIn = null, metaIn = null;
final Path TEST_PATH = new Path("/a");
final long RANDOM_SEED = 4567L;
BlockReaderLocal blockReaderLocal = null;
FSDataInputStream fsIn = null;
byte original[] = new byte[BlockReaderLocalTest.TEST_LENGTH];
FileSystem fs = null;
ShortCircuitShm shm = null;
RandomAccessFile raf = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, TEST_PATH,
BlockReaderLocalTest.TEST_LENGTH, (short)1, RANDOM_SEED);
try {
DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
} catch (InterruptedException e) {
Assert.fail("unexpected InterruptedException during " +
"waitReplication: " + e);
} catch (TimeoutException e) {
Assert.fail("unexpected TimeoutException during " +
"waitReplication: " + e);
}
fsIn = fs.open(TEST_PATH);
IOUtils.readFully(fsIn, original, 0,
BlockReaderLocalTest.TEST_LENGTH);
fsIn.close();
fsIn = null;
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, TEST_PATH);
File dataFile = cluster.getBlockFile(0, block);
File metaFile = cluster.getBlockMetadataFile(0, block);
ShortCircuitCache shortCircuitCache =
ClientContext.getFromConf(conf).getShortCircuitCache();
cluster.shutdown();
cluster = null;
test.setup(dataFile, checksum);
FileInputStream streams[] = {
new FileInputStream(dataFile),
new FileInputStream(metaFile)
};
dataIn = streams[0];
metaIn = streams[1];
ExtendedBlockId key = new ExtendedBlockId(block.getBlockId(),
block.getBlockPoolId());
raf = new RandomAccessFile(
new File(sockDir.getDir().getAbsolutePath(),
UUID.randomUUID().toString()), "rw");
raf.setLength(8192);
FileInputStream shmStream = new FileInputStream(raf.getFD());
shm = new ShortCircuitShm(ShmId.createRandom(), shmStream);
ShortCircuitReplica replica =
new ShortCircuitReplica(key, dataIn, metaIn, shortCircuitCache,
Time.now(), shm.allocAndRegisterSlot(
ExtendedBlockId.fromExtendedBlock(block)));
blockReaderLocal = new BlockReaderLocal.Builder(
new DfsClientConf.ShortCircuitConf(conf)).
setFilename(TEST_PATH.getName()).
setBlock(block).
setShortCircuitReplica(replica).
setCachingStrategy(new CachingStrategy(false, readahead)).
setVerifyChecksum(checksum).
build();
dataIn = null;
metaIn = null;
test.doTest(blockReaderLocal, original);
// BlockReaderLocal should not alter the file position.
Assert.assertEquals(0, streams[0].getChannel().position());
Assert.assertEquals(0, streams[1].getChannel().position());
} finally {
if (fsIn != null) fsIn.close();
if (fs != null) fs.close();
if (cluster != null) cluster.shutdown();
if (dataIn != null) dataIn.close();
if (metaIn != null) metaIn.close();
if (blockReaderLocal != null) blockReaderLocal.close();
if (shm != null) shm.free();
if (raf != null) raf.close();
}
}
private static class TestBlockReaderLocalImmediateClose
extends BlockReaderLocalTest {
}
@Test
public void testBlockReaderLocalImmediateClose() throws IOException {
runBlockReaderLocalTest(new TestBlockReaderLocalImmediateClose(), true, 0);
runBlockReaderLocalTest(new TestBlockReaderLocalImmediateClose(), false, 0);
}
private static class TestBlockReaderSimpleReads
extends BlockReaderLocalTest {
@Override
public void doTest(BlockReaderLocal reader, byte original[])
throws IOException {
byte buf[] = new byte[TEST_LENGTH];
reader.readFully(buf, 0, 512);
assertArrayRegionsEqual(original, 0, buf, 0, 512);
reader.readFully(buf, 512, 512);
assertArrayRegionsEqual(original, 512, buf, 512, 512);
reader.readFully(buf, 1024, 513);
assertArrayRegionsEqual(original, 1024, buf, 1024, 513);
reader.readFully(buf, 1537, 514);
assertArrayRegionsEqual(original, 1537, buf, 1537, 514);
// Readahead is always at least the size of one chunk in this test.
Assert.assertTrue(reader.getMaxReadaheadLength() >=
BlockReaderLocalTest.BYTES_PER_CHECKSUM);
}
}
@Test
public void testBlockReaderSimpleReads() throws IOException {
runBlockReaderLocalTest(new TestBlockReaderSimpleReads(), true,
DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
}
@Test
public void testBlockReaderSimpleReadsShortReadahead() throws IOException {
runBlockReaderLocalTest(new TestBlockReaderSimpleReads(), true,
BlockReaderLocalTest.BYTES_PER_CHECKSUM - 1);
}
@Test
public void testBlockReaderSimpleReadsNoChecksum() throws IOException {
runBlockReaderLocalTest(new TestBlockReaderSimpleReads(), false,
DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
}
@Test
public void testBlockReaderSimpleReadsNoReadahead() throws IOException {
runBlockReaderLocalTest(new TestBlockReaderSimpleReads(), true, 0);
}
@Test
public void testBlockReaderSimpleReadsNoChecksumNoReadahead()
throws IOException {
runBlockReaderLocalTest(new TestBlockReaderSimpleReads(), false, 0);
}
private static class TestBlockReaderLocalArrayReads2
extends BlockReaderLocalTest {
@Override
public void doTest(BlockReaderLocal reader, byte original[])
throws IOException {
byte buf[] = new byte[TEST_LENGTH];
reader.readFully(buf, 0, 10);
assertArrayRegionsEqual(original, 0, buf, 0, 10);
reader.readFully(buf, 10, 100);
assertArrayRegionsEqual(original, 10, buf, 10, 100);
reader.readFully(buf, 110, 700);
assertArrayRegionsEqual(original, 110, buf, 110, 700);
reader.readFully(buf, 810, 1); // from offset 810 to offset 811
reader.readFully(buf, 811, 5);
assertArrayRegionsEqual(original, 811, buf, 811, 5);
reader.readFully(buf, 816, 900); // skip from offset 816 to offset 1716
reader.readFully(buf, 1716, 5);
assertArrayRegionsEqual(original, 1716, buf, 1716, 5);
}
}
@Test
public void testBlockReaderLocalArrayReads2() throws IOException {
runBlockReaderLocalTest(new TestBlockReaderLocalArrayReads2(),
true, DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
}
@Test
public void testBlockReaderLocalArrayReads2NoChecksum()
throws IOException {
runBlockReaderLocalTest(new TestBlockReaderLocalArrayReads2(),
false, DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
}
@Test
public void testBlockReaderLocalArrayReads2NoReadahead()
throws IOException {
runBlockReaderLocalTest(new TestBlockReaderLocalArrayReads2(), true, 0);
}
@Test
public void testBlockReaderLocalArrayReads2NoChecksumNoReadahead()
throws IOException {
runBlockReaderLocalTest(new TestBlockReaderLocalArrayReads2(), false, 0);
}
private static class TestBlockReaderLocalByteBufferReads
extends BlockReaderLocalTest {
@Override
public void doTest(BlockReaderLocal reader, byte original[])
throws IOException {
ByteBuffer buf = ByteBuffer.wrap(new byte[TEST_LENGTH]);
readFully(reader, buf, 0, 10);
assertArrayRegionsEqual(original, 0, buf.array(), 0, 10);
readFully(reader, buf, 10, 100);
assertArrayRegionsEqual(original, 10, buf.array(), 10, 100);
readFully(reader, buf, 110, 700);
assertArrayRegionsEqual(original, 110, buf.array(), 110, 700);
reader.skip(1); // skip from offset 810 to offset 811
readFully(reader, buf, 811, 5);
assertArrayRegionsEqual(original, 811, buf.array(), 811, 5);
}
}
@Test
public void testBlockReaderLocalByteBufferReads()
throws IOException {
runBlockReaderLocalTest(new TestBlockReaderLocalByteBufferReads(),
true, DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
}
@Test
public void testBlockReaderLocalByteBufferReadsNoChecksum()
throws IOException {
runBlockReaderLocalTest(
new TestBlockReaderLocalByteBufferReads(),
false, DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
}
@Test
public void testBlockReaderLocalByteBufferReadsNoReadahead()
throws IOException {
runBlockReaderLocalTest(new TestBlockReaderLocalByteBufferReads(),
true, 0);
}
@Test
public void testBlockReaderLocalByteBufferReadsNoChecksumNoReadahead()
throws IOException {
runBlockReaderLocalTest(new TestBlockReaderLocalByteBufferReads(),
false, 0);
}
/**
* Test reads that bypass the bounce buffer (because they are aligned
* and bigger than the readahead).
*/
private static class TestBlockReaderLocalByteBufferFastLaneReads
extends BlockReaderLocalTest {
@Override
public void doTest(BlockReaderLocal reader, byte original[])
throws IOException {
ByteBuffer buf = ByteBuffer.allocateDirect(TEST_LENGTH);
readFully(reader, buf, 0, 5120);
buf.flip();
assertArrayRegionsEqual(original, 0,
DFSTestUtil.asArray(buf), 0,
5120);
reader.skip(1537);
readFully(reader, buf, 0, 1);
buf.flip();
assertArrayRegionsEqual(original, 6657,
DFSTestUtil.asArray(buf), 0,
1);
reader.forceAnchorable();
readFully(reader, buf, 0, 5120);
buf.flip();
assertArrayRegionsEqual(original, 6658,
DFSTestUtil.asArray(buf), 0,
5120);
reader.forceUnanchorable();
readFully(reader, buf, 0, 513);
buf.flip();
assertArrayRegionsEqual(original, 11778,
DFSTestUtil.asArray(buf), 0,
513);
reader.skip(3);
readFully(reader, buf, 0, 50);
buf.flip();
assertArrayRegionsEqual(original, 12294,
DFSTestUtil.asArray(buf), 0,
50);
}
}
@Test
public void testBlockReaderLocalByteBufferFastLaneReads()
throws IOException {
runBlockReaderLocalTest(new TestBlockReaderLocalByteBufferFastLaneReads(),
true, 2 * BlockReaderLocalTest.BYTES_PER_CHECKSUM);
}
@Test
public void testBlockReaderLocalByteBufferFastLaneReadsNoChecksum()
throws IOException {
runBlockReaderLocalTest(
new TestBlockReaderLocalByteBufferFastLaneReads(),
false, 2 * BlockReaderLocalTest.BYTES_PER_CHECKSUM);
}
@Test
public void testBlockReaderLocalByteBufferFastLaneReadsNoReadahead()
throws IOException {
runBlockReaderLocalTest(new TestBlockReaderLocalByteBufferFastLaneReads(),
true, 0);
}
@Test
public void testBlockReaderLocalByteBufferFastLaneReadsNoChecksumNoReadahead()
throws IOException {
runBlockReaderLocalTest(new TestBlockReaderLocalByteBufferFastLaneReads(),
false, 0);
}
private static class TestBlockReaderLocalReadCorruptStart
extends BlockReaderLocalTest {
boolean usingChecksums = false;
@Override
public void setup(File blockFile, boolean usingChecksums)
throws IOException {
RandomAccessFile bf = null;
this.usingChecksums = usingChecksums;
try {
bf = new RandomAccessFile(blockFile, "rw");
bf.write(new byte[] {0,0,0,0,0,0,0,0,0,0,0,0,0,0});
} finally {
if (bf != null) bf.close();
}
}
public void doTest(BlockReaderLocal reader, byte original[])
throws IOException {
byte buf[] = new byte[TEST_LENGTH];
if (usingChecksums) {
try {
reader.readFully(buf, 0, 10);
Assert.fail("did not detect corruption");
} catch (IOException e) {
// expected
}
} else {
reader.readFully(buf, 0, 10);
}
}
}
@Test
public void testBlockReaderLocalReadCorruptStart()
throws IOException {
runBlockReaderLocalTest(new TestBlockReaderLocalReadCorruptStart(), true,
DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
}
private static class TestBlockReaderLocalReadCorrupt
extends BlockReaderLocalTest {
boolean usingChecksums = false;
@Override
public void setup(File blockFile, boolean usingChecksums)
throws IOException {
RandomAccessFile bf = null;
this.usingChecksums = usingChecksums;
try {
bf = new RandomAccessFile(blockFile, "rw");
bf.seek(1539);
bf.write(new byte[] {0,0,0,0,0,0,0,0,0,0,0,0,0,0});
} finally {
if (bf != null) bf.close();
}
}
public void doTest(BlockReaderLocal reader, byte original[])
throws IOException {
byte buf[] = new byte[TEST_LENGTH];
try {
reader.readFully(buf, 0, 10);
assertArrayRegionsEqual(original, 0, buf, 0, 10);
reader.readFully(buf, 10, 100);
assertArrayRegionsEqual(original, 10, buf, 10, 100);
reader.readFully(buf, 110, 700);
assertArrayRegionsEqual(original, 110, buf, 110, 700);
reader.skip(1); // skip from offset 810 to offset 811
reader.readFully(buf, 811, 5);
assertArrayRegionsEqual(original, 811, buf, 811, 5);
reader.readFully(buf, 816, 900);
if (usingChecksums) {
// We should detect the corruption when using a checksum file.
Assert.fail("did not detect corruption");
}
} catch (ChecksumException e) {
if (!usingChecksums) {
Assert.fail("didn't expect to get ChecksumException: not " +
"using checksums.");
}
}
}
}
@Test
public void testBlockReaderLocalReadCorrupt()
throws IOException {
runBlockReaderLocalTest(new TestBlockReaderLocalReadCorrupt(), true,
DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
}
@Test
public void testBlockReaderLocalReadCorruptNoChecksum()
throws IOException {
runBlockReaderLocalTest(new TestBlockReaderLocalReadCorrupt(), false,
DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
}
@Test
public void testBlockReaderLocalReadCorruptNoReadahead()
throws IOException {
runBlockReaderLocalTest(new TestBlockReaderLocalReadCorrupt(), true, 0);
}
@Test
public void testBlockReaderLocalReadCorruptNoChecksumNoReadahead()
throws IOException {
runBlockReaderLocalTest(new TestBlockReaderLocalReadCorrupt(), false, 0);
}
private static class TestBlockReaderLocalWithMlockChanges
extends BlockReaderLocalTest {
@Override
public void setup(File blockFile, boolean usingChecksums)
throws IOException {
}
@Override
public void doTest(BlockReaderLocal reader, byte original[])
throws IOException {
ByteBuffer buf = ByteBuffer.wrap(new byte[TEST_LENGTH]);
reader.skip(1);
readFully(reader, buf, 1, 9);
assertArrayRegionsEqual(original, 1, buf.array(), 1, 9);
readFully(reader, buf, 10, 100);
assertArrayRegionsEqual(original, 10, buf.array(), 10, 100);
reader.forceAnchorable();
readFully(reader, buf, 110, 700);
assertArrayRegionsEqual(original, 110, buf.array(), 110, 700);
reader.forceUnanchorable();
reader.skip(1); // skip from offset 810 to offset 811
readFully(reader, buf, 811, 5);
assertArrayRegionsEqual(original, 811, buf.array(), 811, 5);
}
}
@Test
public void testBlockReaderLocalWithMlockChanges()
throws IOException {
runBlockReaderLocalTest(new TestBlockReaderLocalWithMlockChanges(),
true, DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
}
@Test
public void testBlockReaderLocalWithMlockChangesNoChecksum()
throws IOException {
runBlockReaderLocalTest(new TestBlockReaderLocalWithMlockChanges(),
false, DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
}
@Test
public void testBlockReaderLocalWithMlockChangesNoReadahead()
throws IOException {
runBlockReaderLocalTest(new TestBlockReaderLocalWithMlockChanges(),
true, 0);
}
@Test
public void testBlockReaderLocalWithMlockChangesNoChecksumNoReadahead()
throws IOException {
runBlockReaderLocalTest(new TestBlockReaderLocalWithMlockChanges(),
false, 0);
}
private static class TestBlockReaderLocalOnFileWithoutChecksum
extends BlockReaderLocalTest {
@Override
public void setConfiguration(HdfsConfiguration conf) {
conf.set(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY, "NULL");
}
@Override
public void doTest(BlockReaderLocal reader, byte original[])
throws IOException {
Assert.assertTrue(!reader.getVerifyChecksum());
ByteBuffer buf = ByteBuffer.wrap(new byte[TEST_LENGTH]);
reader.skip(1);
readFully(reader, buf, 1, 9);
assertArrayRegionsEqual(original, 1, buf.array(), 1, 9);
readFully(reader, buf, 10, 100);
assertArrayRegionsEqual(original, 10, buf.array(), 10, 100);
reader.forceAnchorable();
readFully(reader, buf, 110, 700);
assertArrayRegionsEqual(original, 110, buf.array(), 110, 700);
reader.forceUnanchorable();
reader.skip(1); // skip from offset 810 to offset 811
readFully(reader, buf, 811, 5);
assertArrayRegionsEqual(original, 811, buf.array(), 811, 5);
}
}
private static class TestBlockReaderLocalReadZeroBytes
extends BlockReaderLocalTest {
@Override
public void doTest(BlockReaderLocal reader, byte original[])
throws IOException {
byte emptyArr[] = new byte[0];
Assert.assertEquals(0, reader.read(emptyArr, 0, 0));
ByteBuffer emptyBuf = ByteBuffer.wrap(emptyArr);
Assert.assertEquals(0, reader.read(emptyBuf));
reader.skip(1);
Assert.assertEquals(0, reader.read(emptyArr, 0, 0));
Assert.assertEquals(0, reader.read(emptyBuf));
reader.skip(BlockReaderLocalTest.TEST_LENGTH - 1);
Assert.assertEquals(-1, reader.read(emptyArr, 0, 0));
Assert.assertEquals(-1, reader.read(emptyBuf));
}
}
@Test
public void testBlockReaderLocalOnFileWithoutChecksum()
throws IOException {
runBlockReaderLocalTest(new TestBlockReaderLocalOnFileWithoutChecksum(),
true, DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
}
@Test
public void testBlockReaderLocalOnFileWithoutChecksumNoChecksum()
throws IOException {
runBlockReaderLocalTest(new TestBlockReaderLocalOnFileWithoutChecksum(),
false, DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
}
@Test
public void testBlockReaderLocalOnFileWithoutChecksumNoReadahead()
throws IOException {
runBlockReaderLocalTest(new TestBlockReaderLocalOnFileWithoutChecksum(),
true, 0);
}
@Test
public void testBlockReaderLocalOnFileWithoutChecksumNoChecksumNoReadahead()
throws IOException {
runBlockReaderLocalTest(new TestBlockReaderLocalOnFileWithoutChecksum(),
false, 0);
}
@Test
public void testBlockReaderLocalReadZeroBytes()
throws IOException {
runBlockReaderLocalTest(new TestBlockReaderLocalReadZeroBytes(),
true, DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
}
@Test
public void testBlockReaderLocalReadZeroBytesNoChecksum()
throws IOException {
runBlockReaderLocalTest(new TestBlockReaderLocalReadZeroBytes(),
false, DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
}
@Test
public void testBlockReaderLocalReadZeroBytesNoReadahead()
throws IOException {
runBlockReaderLocalTest(new TestBlockReaderLocalReadZeroBytes(),
true, 0);
}
@Test
public void testBlockReaderLocalReadZeroBytesNoChecksumNoReadahead()
throws IOException {
runBlockReaderLocalTest(new TestBlockReaderLocalReadZeroBytes(),
false, 0);
}
@Test(timeout=60000)
public void TestStatisticsForShortCircuitLocalRead() throws Exception {
testStatistics(true);
}
@Test(timeout=60000)
public void TestStatisticsForLocalRead() throws Exception {
testStatistics(false);
}
private void testStatistics(boolean isShortCircuit) throws Exception {
Assume.assumeTrue(DomainSocket.getLoadingFailureReason() == null);
HdfsConfiguration conf = new HdfsConfiguration();
TemporarySocketDirectory sockDir = null;
if (isShortCircuit) {
DFSInputStream.tcpReadsDisabledForTesting = true;
sockDir = new TemporarySocketDirectory();
conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
new File(sockDir.getDir(), "TestStatisticsForLocalRead.%d.sock").
getAbsolutePath());
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
DomainSocket.disableBindPathValidation();
} else {
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, false);
}
MiniDFSCluster cluster = null;
final Path TEST_PATH = new Path("/a");
final long RANDOM_SEED = 4567L;
FSDataInputStream fsIn = null;
byte original[] = new byte[BlockReaderLocalTest.TEST_LENGTH];
FileSystem fs = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, TEST_PATH,
BlockReaderLocalTest.TEST_LENGTH, (short)1, RANDOM_SEED);
try {
DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
} catch (InterruptedException e) {
Assert.fail("unexpected InterruptedException during " +
"waitReplication: " + e);
} catch (TimeoutException e) {
Assert.fail("unexpected TimeoutException during " +
"waitReplication: " + e);
}
fsIn = fs.open(TEST_PATH);
IOUtils.readFully(fsIn, original, 0,
BlockReaderLocalTest.TEST_LENGTH);
HdfsDataInputStream dfsIn = (HdfsDataInputStream)fsIn;
Assert.assertEquals(BlockReaderLocalTest.TEST_LENGTH,
dfsIn.getReadStatistics().getTotalBytesRead());
Assert.assertEquals(BlockReaderLocalTest.TEST_LENGTH,
dfsIn.getReadStatistics().getTotalLocalBytesRead());
if (isShortCircuit) {
Assert.assertEquals(BlockReaderLocalTest.TEST_LENGTH,
dfsIn.getReadStatistics().getTotalShortCircuitBytesRead());
} else {
Assert.assertEquals(0,
dfsIn.getReadStatistics().getTotalShortCircuitBytesRead());
}
fsIn.close();
fsIn = null;
} finally {
DFSInputStream.tcpReadsDisabledForTesting = false;
if (fsIn != null) fsIn.close();
if (fs != null) fs.close();
if (cluster != null) cluster.shutdown();
if (sockDir != null) sockDir.close();
}
}
}
| 27,718 | 34.674389 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocolPB;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclEntryScope;
import org.apache.hadoop.fs.permission.AclEntryType;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto;
import org.apache.hadoop.hdfs.security.token.block.BlockKey;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.DataChecksum;
import org.junit.Assert;
import org.junit.Test;
import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
/**
* Tests for {@link PBHelper}
*/
public class TestPBHelper {
/**
* Used for asserting equality on doubles.
*/
private static final double DELTA = 0.000001;
@Test
public void testConvertNamenodeRole() {
assertEquals(NamenodeRoleProto.BACKUP,
PBHelper.convert(NamenodeRole.BACKUP));
assertEquals(NamenodeRoleProto.CHECKPOINT,
PBHelper.convert(NamenodeRole.CHECKPOINT));
assertEquals(NamenodeRoleProto.NAMENODE,
PBHelper.convert(NamenodeRole.NAMENODE));
assertEquals(NamenodeRole.BACKUP,
PBHelper.convert(NamenodeRoleProto.BACKUP));
assertEquals(NamenodeRole.CHECKPOINT,
PBHelper.convert(NamenodeRoleProto.CHECKPOINT));
assertEquals(NamenodeRole.NAMENODE,
PBHelper.convert(NamenodeRoleProto.NAMENODE));
}
private static StorageInfo getStorageInfo(NodeType type) {
return new StorageInfo(1, 2, "cid", 3, type);
}
@Test
public void testConvertStoragInfo() {
StorageInfo info = getStorageInfo(NodeType.NAME_NODE);
StorageInfoProto infoProto = PBHelper.convert(info);
StorageInfo info2 = PBHelper.convert(infoProto, NodeType.NAME_NODE);
assertEquals(info.getClusterID(), info2.getClusterID());
assertEquals(info.getCTime(), info2.getCTime());
assertEquals(info.getLayoutVersion(), info2.getLayoutVersion());
assertEquals(info.getNamespaceID(), info2.getNamespaceID());
}
@Test
public void testConvertNamenodeRegistration() {
StorageInfo info = getStorageInfo(NodeType.NAME_NODE);
NamenodeRegistration reg = new NamenodeRegistration("address:999",
"http:1000", info, NamenodeRole.NAMENODE);
NamenodeRegistrationProto regProto = PBHelper.convert(reg);
NamenodeRegistration reg2 = PBHelper.convert(regProto);
assertEquals(reg.getAddress(), reg2.getAddress());
assertEquals(reg.getClusterID(), reg2.getClusterID());
assertEquals(reg.getCTime(), reg2.getCTime());
assertEquals(reg.getHttpAddress(), reg2.getHttpAddress());
assertEquals(reg.getLayoutVersion(), reg2.getLayoutVersion());
assertEquals(reg.getNamespaceID(), reg2.getNamespaceID());
assertEquals(reg.getRegistrationID(), reg2.getRegistrationID());
assertEquals(reg.getRole(), reg2.getRole());
assertEquals(reg.getVersion(), reg2.getVersion());
}
@Test
public void testConvertDatanodeID() {
DatanodeID dn = DFSTestUtil.getLocalDatanodeID();
DatanodeIDProto dnProto = PBHelper.convert(dn);
DatanodeID dn2 = PBHelper.convert(dnProto);
compare(dn, dn2);
}
void compare(DatanodeID dn, DatanodeID dn2) {
assertEquals(dn.getIpAddr(), dn2.getIpAddr());
assertEquals(dn.getHostName(), dn2.getHostName());
assertEquals(dn.getDatanodeUuid(), dn2.getDatanodeUuid());
assertEquals(dn.getXferPort(), dn2.getXferPort());
assertEquals(dn.getInfoPort(), dn2.getInfoPort());
assertEquals(dn.getIpcPort(), dn2.getIpcPort());
}
void compare(DatanodeStorage dns1, DatanodeStorage dns2) {
assertThat(dns2.getStorageID(), is(dns1.getStorageID()));
assertThat(dns2.getState(), is(dns1.getState()));
assertThat(dns2.getStorageType(), is(dns1.getStorageType()));
}
@Test
public void testConvertBlock() {
Block b = new Block(1, 100, 3);
BlockProto bProto = PBHelper.convert(b);
Block b2 = PBHelper.convert(bProto);
assertEquals(b, b2);
}
private static BlockWithLocations getBlockWithLocations(int bid) {
final String[] datanodeUuids = {"dn1", "dn2", "dn3"};
final String[] storageIDs = {"s1", "s2", "s3"};
final StorageType[] storageTypes = {
StorageType.DISK, StorageType.DISK, StorageType.DISK};
return new BlockWithLocations(new Block(bid, 0, 1),
datanodeUuids, storageIDs, storageTypes);
}
private void compare(BlockWithLocations locs1, BlockWithLocations locs2) {
assertEquals(locs1.getBlock(), locs2.getBlock());
assertTrue(Arrays.equals(locs1.getStorageIDs(), locs2.getStorageIDs()));
}
@Test
public void testConvertBlockWithLocations() {
BlockWithLocations locs = getBlockWithLocations(1);
BlockWithLocationsProto locsProto = PBHelper.convert(locs);
BlockWithLocations locs2 = PBHelper.convert(locsProto);
compare(locs, locs2);
}
@Test
public void testConvertBlocksWithLocations() {
BlockWithLocations[] list = new BlockWithLocations[] {
getBlockWithLocations(1), getBlockWithLocations(2) };
BlocksWithLocations locs = new BlocksWithLocations(list);
BlocksWithLocationsProto locsProto = PBHelper.convert(locs);
BlocksWithLocations locs2 = PBHelper.convert(locsProto);
BlockWithLocations[] blocks = locs.getBlocks();
BlockWithLocations[] blocks2 = locs2.getBlocks();
assertEquals(blocks.length, blocks2.length);
for (int i = 0; i < blocks.length; i++) {
compare(blocks[i], blocks2[i]);
}
}
private static BlockKey getBlockKey(int keyId) {
return new BlockKey(keyId, 10, "encodedKey".getBytes());
}
private void compare(BlockKey k1, BlockKey k2) {
assertEquals(k1.getExpiryDate(), k2.getExpiryDate());
assertEquals(k1.getKeyId(), k2.getKeyId());
assertTrue(Arrays.equals(k1.getEncodedKey(), k2.getEncodedKey()));
}
@Test
public void testConvertBlockKey() {
BlockKey key = getBlockKey(1);
BlockKeyProto keyProto = PBHelper.convert(key);
BlockKey key1 = PBHelper.convert(keyProto);
compare(key, key1);
}
@Test
public void testConvertExportedBlockKeys() {
BlockKey[] keys = new BlockKey[] { getBlockKey(2), getBlockKey(3) };
ExportedBlockKeys expKeys = new ExportedBlockKeys(true, 9, 10,
getBlockKey(1), keys);
ExportedBlockKeysProto expKeysProto = PBHelper.convert(expKeys);
ExportedBlockKeys expKeys1 = PBHelper.convert(expKeysProto);
compare(expKeys, expKeys1);
}
void compare(ExportedBlockKeys expKeys, ExportedBlockKeys expKeys1) {
BlockKey[] allKeys = expKeys.getAllKeys();
BlockKey[] allKeys1 = expKeys1.getAllKeys();
assertEquals(allKeys.length, allKeys1.length);
for (int i = 0; i < allKeys.length; i++) {
compare(allKeys[i], allKeys1[i]);
}
compare(expKeys.getCurrentKey(), expKeys1.getCurrentKey());
assertEquals(expKeys.getKeyUpdateInterval(),
expKeys1.getKeyUpdateInterval());
assertEquals(expKeys.getTokenLifetime(), expKeys1.getTokenLifetime());
}
@Test
public void testConvertCheckpointSignature() {
CheckpointSignature s = new CheckpointSignature(
getStorageInfo(NodeType.NAME_NODE), "bpid", 100, 1);
CheckpointSignatureProto sProto = PBHelper.convert(s);
CheckpointSignature s1 = PBHelper.convert(sProto);
assertEquals(s.getBlockpoolID(), s1.getBlockpoolID());
assertEquals(s.getClusterID(), s1.getClusterID());
assertEquals(s.getCTime(), s1.getCTime());
assertEquals(s.getCurSegmentTxId(), s1.getCurSegmentTxId());
assertEquals(s.getLayoutVersion(), s1.getLayoutVersion());
assertEquals(s.getMostRecentCheckpointTxId(),
s1.getMostRecentCheckpointTxId());
assertEquals(s.getNamespaceID(), s1.getNamespaceID());
}
private static void compare(RemoteEditLog l1, RemoteEditLog l2) {
assertEquals(l1.getEndTxId(), l2.getEndTxId());
assertEquals(l1.getStartTxId(), l2.getStartTxId());
}
@Test
public void testConvertRemoteEditLog() {
RemoteEditLog l = new RemoteEditLog(1, 100);
RemoteEditLogProto lProto = PBHelper.convert(l);
RemoteEditLog l1 = PBHelper.convert(lProto);
compare(l, l1);
}
@Test
public void testConvertRemoteEditLogManifest() {
List<RemoteEditLog> logs = new ArrayList<RemoteEditLog>();
logs.add(new RemoteEditLog(1, 10));
logs.add(new RemoteEditLog(11, 20));
RemoteEditLogManifest m = new RemoteEditLogManifest(logs);
RemoteEditLogManifestProto mProto = PBHelper.convert(m);
RemoteEditLogManifest m1 = PBHelper.convert(mProto);
List<RemoteEditLog> logs1 = m1.getLogs();
assertEquals(logs.size(), logs1.size());
for (int i = 0; i < logs.size(); i++) {
compare(logs.get(i), logs1.get(i));
}
}
public ExtendedBlock getExtendedBlock() {
return getExtendedBlock(1);
}
public ExtendedBlock getExtendedBlock(long blkid) {
return new ExtendedBlock("bpid", blkid, 100, 2);
}
private void compare(DatanodeInfo dn1, DatanodeInfo dn2) {
assertEquals(dn1.getAdminState(), dn2.getAdminState());
assertEquals(dn1.getBlockPoolUsed(), dn2.getBlockPoolUsed());
assertEquals(dn1.getBlockPoolUsedPercent(),
dn2.getBlockPoolUsedPercent(), DELTA);
assertEquals(dn1.getCapacity(), dn2.getCapacity());
assertEquals(dn1.getDatanodeReport(), dn2.getDatanodeReport());
assertEquals(dn1.getDfsUsed(), dn1.getDfsUsed());
assertEquals(dn1.getDfsUsedPercent(), dn1.getDfsUsedPercent(), DELTA);
assertEquals(dn1.getIpAddr(), dn2.getIpAddr());
assertEquals(dn1.getHostName(), dn2.getHostName());
assertEquals(dn1.getInfoPort(), dn2.getInfoPort());
assertEquals(dn1.getIpcPort(), dn2.getIpcPort());
assertEquals(dn1.getLastUpdate(), dn2.getLastUpdate());
assertEquals(dn1.getLevel(), dn2.getLevel());
assertEquals(dn1.getNetworkLocation(), dn2.getNetworkLocation());
}
@Test
public void testConvertExtendedBlock() {
ExtendedBlock b = getExtendedBlock();
ExtendedBlockProto bProto = PBHelper.convert(b);
ExtendedBlock b1 = PBHelper.convert(bProto);
assertEquals(b, b1);
b.setBlockId(-1);
bProto = PBHelper.convert(b);
b1 = PBHelper.convert(bProto);
assertEquals(b, b1);
}
@Test
public void testConvertRecoveringBlock() {
DatanodeInfo di1 = DFSTestUtil.getLocalDatanodeInfo();
DatanodeInfo di2 = DFSTestUtil.getLocalDatanodeInfo();
DatanodeInfo[] dnInfo = new DatanodeInfo[] { di1, di2 };
RecoveringBlock b = new RecoveringBlock(getExtendedBlock(), dnInfo, 3);
RecoveringBlockProto bProto = PBHelper.convert(b);
RecoveringBlock b1 = PBHelper.convert(bProto);
assertEquals(b.getBlock(), b1.getBlock());
DatanodeInfo[] dnInfo1 = b1.getLocations();
assertEquals(dnInfo.length, dnInfo1.length);
for (int i=0; i < dnInfo.length; i++) {
compare(dnInfo[0], dnInfo1[0]);
}
}
@Test
public void testConvertBlockRecoveryCommand() {
DatanodeInfo di1 = DFSTestUtil.getLocalDatanodeInfo();
DatanodeInfo di2 = DFSTestUtil.getLocalDatanodeInfo();
DatanodeInfo[] dnInfo = new DatanodeInfo[] { di1, di2 };
List<RecoveringBlock> blks = ImmutableList.of(
new RecoveringBlock(getExtendedBlock(1), dnInfo, 3),
new RecoveringBlock(getExtendedBlock(2), dnInfo, 3)
);
BlockRecoveryCommand cmd = new BlockRecoveryCommand(blks);
BlockRecoveryCommandProto proto = PBHelper.convert(cmd);
assertEquals(1, proto.getBlocks(0).getBlock().getB().getBlockId());
assertEquals(2, proto.getBlocks(1).getBlock().getB().getBlockId());
BlockRecoveryCommand cmd2 = PBHelper.convert(proto);
List<RecoveringBlock> cmd2Blks = Lists.newArrayList(
cmd2.getRecoveringBlocks());
assertEquals(blks.get(0).getBlock(), cmd2Blks.get(0).getBlock());
assertEquals(blks.get(1).getBlock(), cmd2Blks.get(1).getBlock());
assertEquals(Joiner.on(",").join(blks), Joiner.on(",").join(cmd2Blks));
assertEquals(cmd.toString(), cmd2.toString());
}
@Test
public void testConvertText() {
Text t = new Text("abc".getBytes());
String s = t.toString();
Text t1 = new Text(s);
assertEquals(t, t1);
}
@Test
public void testConvertBlockToken() {
Token<BlockTokenIdentifier> token = new Token<BlockTokenIdentifier>(
"identifier".getBytes(), "password".getBytes(), new Text("kind"),
new Text("service"));
TokenProto tokenProto = PBHelper.convert(token);
Token<BlockTokenIdentifier> token2 = PBHelper.convert(tokenProto);
compare(token, token2);
}
@Test
public void testConvertNamespaceInfo() {
NamespaceInfo info = new NamespaceInfo(37, "clusterID", "bpID", 2300);
NamespaceInfoProto proto = PBHelper.convert(info);
NamespaceInfo info2 = PBHelper.convert(proto);
compare(info, info2); //Compare the StorageInfo
assertEquals(info.getBlockPoolID(), info2.getBlockPoolID());
assertEquals(info.getBuildVersion(), info2.getBuildVersion());
}
private void compare(StorageInfo expected, StorageInfo actual) {
assertEquals(expected.clusterID, actual.clusterID);
assertEquals(expected.namespaceID, actual.namespaceID);
assertEquals(expected.cTime, actual.cTime);
assertEquals(expected.layoutVersion, actual.layoutVersion);
}
private void compare(Token<BlockTokenIdentifier> expected,
Token<BlockTokenIdentifier> actual) {
assertTrue(Arrays.equals(expected.getIdentifier(), actual.getIdentifier()));
assertTrue(Arrays.equals(expected.getPassword(), actual.getPassword()));
assertEquals(expected.getKind(), actual.getKind());
assertEquals(expected.getService(), actual.getService());
}
private void compare(LocatedBlock expected, LocatedBlock actual) {
assertEquals(expected.getBlock(), actual.getBlock());
compare(expected.getBlockToken(), actual.getBlockToken());
assertEquals(expected.getStartOffset(), actual.getStartOffset());
assertEquals(expected.isCorrupt(), actual.isCorrupt());
DatanodeInfo [] ei = expected.getLocations();
DatanodeInfo [] ai = actual.getLocations();
assertEquals(ei.length, ai.length);
for (int i = 0; i < ei.length ; i++) {
compare(ei[i], ai[i]);
}
}
private LocatedBlock createLocatedBlock() {
DatanodeInfo[] dnInfos = {
DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1",
AdminStates.DECOMMISSION_INPROGRESS),
DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2",
AdminStates.DECOMMISSIONED),
DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3",
AdminStates.NORMAL),
DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h4",
AdminStates.NORMAL),
};
String[] storageIDs = {"s1", "s2", "s3", "s4"};
StorageType[] media = {
StorageType.DISK,
StorageType.SSD,
StorageType.DISK,
StorageType.RAM_DISK
};
LocatedBlock lb = new LocatedBlock(
new ExtendedBlock("bp12", 12345, 10, 53),
dnInfos, storageIDs, media, 5, false, new DatanodeInfo[]{});
lb.setBlockToken(new Token<BlockTokenIdentifier>(
"identifier".getBytes(), "password".getBytes(), new Text("kind"),
new Text("service")));
return lb;
}
private LocatedBlock createLocatedBlockNoStorageMedia() {
DatanodeInfo[] dnInfos = {
DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1",
AdminStates.DECOMMISSION_INPROGRESS),
DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2",
AdminStates.DECOMMISSIONED),
DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3",
AdminStates.NORMAL)
};
LocatedBlock lb = new LocatedBlock(
new ExtendedBlock("bp12", 12345, 10, 53), dnInfos);
lb.setBlockToken(new Token<BlockTokenIdentifier>(
"identifier".getBytes(), "password".getBytes(), new Text("kind"),
new Text("service")));
lb.setStartOffset(5);
return lb;
}
@Test
public void testConvertLocatedBlock() {
LocatedBlock lb = createLocatedBlock();
LocatedBlockProto lbProto = PBHelper.convert(lb);
LocatedBlock lb2 = PBHelper.convert(lbProto);
compare(lb,lb2);
}
@Test
public void testConvertLocatedBlockNoStorageMedia() {
LocatedBlock lb = createLocatedBlockNoStorageMedia();
LocatedBlockProto lbProto = PBHelper.convert(lb);
LocatedBlock lb2 = PBHelper.convert(lbProto);
compare(lb,lb2);
}
@Test
public void testConvertLocatedBlockList() {
ArrayList<LocatedBlock> lbl = new ArrayList<LocatedBlock>();
for (int i=0;i<3;i++) {
lbl.add(createLocatedBlock());
}
List<LocatedBlockProto> lbpl = PBHelper.convertLocatedBlock2(lbl);
List<LocatedBlock> lbl2 = PBHelper.convertLocatedBlock(lbpl);
assertEquals(lbl.size(), lbl2.size());
for (int i=0;i<lbl.size();i++) {
compare(lbl.get(i), lbl2.get(2));
}
}
@Test
public void testConvertLocatedBlockArray() {
LocatedBlock [] lbl = new LocatedBlock[3];
for (int i=0;i<3;i++) {
lbl[i] = createLocatedBlock();
}
LocatedBlockProto [] lbpl = PBHelper.convertLocatedBlock(lbl);
LocatedBlock [] lbl2 = PBHelper.convertLocatedBlock(lbpl);
assertEquals(lbl.length, lbl2.length);
for (int i=0;i<lbl.length;i++) {
compare(lbl[i], lbl2[i]);
}
}
@Test
public void testConvertDatanodeRegistration() {
DatanodeID dnId = DFSTestUtil.getLocalDatanodeID();
BlockKey[] keys = new BlockKey[] { getBlockKey(2), getBlockKey(3) };
ExportedBlockKeys expKeys = new ExportedBlockKeys(true, 9, 10,
getBlockKey(1), keys);
DatanodeRegistration reg = new DatanodeRegistration(dnId,
new StorageInfo(NodeType.DATA_NODE), expKeys, "3.0.0");
DatanodeRegistrationProto proto = PBHelper.convert(reg);
DatanodeRegistration reg2 = PBHelper.convert(proto);
compare(reg.getStorageInfo(), reg2.getStorageInfo());
compare(reg.getExportedKeys(), reg2.getExportedKeys());
compare(reg, reg2);
assertEquals(reg.getSoftwareVersion(), reg2.getSoftwareVersion());
}
@Test
public void TestConvertDatanodeStorage() {
DatanodeStorage dns1 = new DatanodeStorage(
"id1", DatanodeStorage.State.NORMAL, StorageType.SSD);
DatanodeStorageProto proto = PBHelper.convert(dns1);
DatanodeStorage dns2 = PBHelper.convert(proto);
compare(dns1, dns2);
}
@Test
public void testConvertBlockCommand() {
Block[] blocks = new Block[] { new Block(21), new Block(22) };
DatanodeInfo[][] dnInfos = new DatanodeInfo[][] { new DatanodeInfo[1],
new DatanodeInfo[2] };
dnInfos[0][0] = DFSTestUtil.getLocalDatanodeInfo();
dnInfos[1][0] = DFSTestUtil.getLocalDatanodeInfo();
dnInfos[1][1] = DFSTestUtil.getLocalDatanodeInfo();
String[][] storageIDs = {{"s00"}, {"s10", "s11"}};
StorageType[][] storageTypes = {{StorageType.DEFAULT},
{StorageType.DEFAULT, StorageType.DEFAULT}};
BlockCommand bc = new BlockCommand(DatanodeProtocol.DNA_TRANSFER, "bp1",
blocks, dnInfos, storageTypes, storageIDs);
BlockCommandProto bcProto = PBHelper.convert(bc);
BlockCommand bc2 = PBHelper.convert(bcProto);
assertEquals(bc.getAction(), bc2.getAction());
assertEquals(bc.getBlocks().length, bc2.getBlocks().length);
Block[] blocks2 = bc2.getBlocks();
for (int i = 0; i < blocks.length; i++) {
assertEquals(blocks[i], blocks2[i]);
}
DatanodeInfo[][] dnInfos2 = bc2.getTargets();
assertEquals(dnInfos.length, dnInfos2.length);
for (int i = 0; i < dnInfos.length; i++) {
DatanodeInfo[] d1 = dnInfos[i];
DatanodeInfo[] d2 = dnInfos2[i];
assertEquals(d1.length, d2.length);
for (int j = 0; j < d1.length; j++) {
compare(d1[j], d2[j]);
}
}
}
@Test
public void testChecksumTypeProto() {
assertEquals(DataChecksum.Type.NULL,
PBHelper.convert(HdfsProtos.ChecksumTypeProto.CHECKSUM_NULL));
assertEquals(DataChecksum.Type.CRC32,
PBHelper.convert(HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32));
assertEquals(DataChecksum.Type.CRC32C,
PBHelper.convert(HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32C));
assertEquals(PBHelper.convert(DataChecksum.Type.NULL),
HdfsProtos.ChecksumTypeProto.CHECKSUM_NULL);
assertEquals(PBHelper.convert(DataChecksum.Type.CRC32),
HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32);
assertEquals(PBHelper.convert(DataChecksum.Type.CRC32C),
HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32C);
}
@Test
public void testAclEntryProto() {
// All fields populated.
AclEntry e1 = new AclEntry.Builder().setName("test")
.setPermission(FsAction.READ_EXECUTE).setScope(AclEntryScope.DEFAULT)
.setType(AclEntryType.OTHER).build();
// No name.
AclEntry e2 = new AclEntry.Builder().setScope(AclEntryScope.ACCESS)
.setType(AclEntryType.USER).setPermission(FsAction.ALL).build();
// No permission, which will default to the 0'th enum element.
AclEntry e3 = new AclEntry.Builder().setScope(AclEntryScope.ACCESS)
.setType(AclEntryType.USER).setName("test").build();
AclEntry[] expected = new AclEntry[] { e1, e2,
new AclEntry.Builder()
.setScope(e3.getScope())
.setType(e3.getType())
.setName(e3.getName())
.setPermission(FsAction.NONE)
.build() };
AclEntry[] actual = Lists.newArrayList(
PBHelper.convertAclEntry(PBHelper.convertAclEntryProto(Lists
.newArrayList(e1, e2, e3)))).toArray(new AclEntry[0]);
Assert.assertArrayEquals(expected, actual);
}
@Test
public void testAclStatusProto() {
AclEntry e = new AclEntry.Builder().setName("test")
.setPermission(FsAction.READ_EXECUTE).setScope(AclEntryScope.DEFAULT)
.setType(AclEntryType.OTHER).build();
AclStatus s = new AclStatus.Builder().owner("foo").group("bar").addEntry(e)
.build();
Assert.assertEquals(s, PBHelper.convert(PBHelper.convert(s)));
}
}
| 26,541 | 40.278383 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDebugAdmin.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.io.IOUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.PrintStream;
import static org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil.*;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
public class TestDebugAdmin {
private MiniDFSCluster cluster;
private DistributedFileSystem fs;
private DebugAdmin admin;
private DataNode datanode;
@Before
public void setUp() throws Exception {
Configuration conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
fs = cluster.getFileSystem();
admin = new DebugAdmin(conf);
datanode = cluster.getDataNodes().get(0);
}
@After
public void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
cluster = null;
}
}
private String runCmd(String[] cmd) throws Exception {
final ByteArrayOutputStream bytes = new ByteArrayOutputStream();
final PrintStream out = new PrintStream(bytes);
final PrintStream oldErr = System.err;
final PrintStream oldOut = System.out;
System.setErr(out);
System.setOut(out);
int ret;
try {
ret = admin.run(cmd);
} finally {
System.setErr(oldErr);
System.setOut(oldOut);
IOUtils.closeStream(out);
}
return "ret: " + ret + ", " +
bytes.toString().replaceAll(System.getProperty("line.separator"), "");
}
@Test(timeout = 60000)
public void testRecoverLease() throws Exception {
assertEquals("ret: 1, You must supply a -path argument to recoverLease.",
runCmd(new String[]{"recoverLease", "-retries", "1"}));
FSDataOutputStream out = fs.create(new Path("/foo"));
out.write(123);
out.close();
assertEquals("ret: 0, recoverLease SUCCEEDED on /foo",
runCmd(new String[]{"recoverLease", "-path", "/foo"}));
}
@Test(timeout = 60000)
public void testVerifyBlockChecksumCommand() throws Exception {
DFSTestUtil.createFile(fs, new Path("/bar"), 1234, (short) 1, 0xdeadbeef);
FsDatasetSpi<?> fsd = datanode.getFSDataset();
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, new Path("/bar"));
File blockFile = getBlockFile(fsd,
block.getBlockPoolId(), block.getLocalBlock());
assertEquals("ret: 1, You must specify a meta file with -meta",
runCmd(new String[]{"verify", "-block", blockFile.getAbsolutePath()}));
File metaFile = getMetaFile(fsd,
block.getBlockPoolId(), block.getLocalBlock());
assertEquals("ret: 0, Checksum type: " +
"DataChecksum(type=CRC32C, chunkSize=512)",
runCmd(new String[]{"verify",
"-meta", metaFile.getAbsolutePath()}));
assertEquals("ret: 0, Checksum type: " +
"DataChecksum(type=CRC32C, chunkSize=512)" +
"Checksum verification succeeded on block file " +
blockFile.getAbsolutePath(),
runCmd(new String[]{"verify",
"-meta", metaFile.getAbsolutePath(),
"-block", blockFile.getAbsolutePath()})
);
}
@Test(timeout = 60000)
public void testRecoverLeaseforFileNotFound() throws Exception {
assertTrue(runCmd(new String[] {
"recoverLease", "-path", "/foo", "-retries", "2" }).contains(
"Giving up on recoverLease for /foo after 1 try"));
}
}
| 4,754 | 36.148438 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.PrintStream;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ha.HAServiceProtocol;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
import org.apache.hadoop.ha.HAServiceStatus;
import org.apache.hadoop.ha.HAServiceTarget;
import org.apache.hadoop.ha.HealthCheckFailedException;
import org.apache.hadoop.ha.ZKFCProtocol;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.test.MockitoUtil;
import org.apache.hadoop.util.Shell;
import org.junit.Before;
import org.junit.Test;
import org.mockito.ArgumentCaptor;
import org.mockito.Mockito;
import com.google.common.base.Charsets;
import com.google.common.base.Joiner;
public class TestDFSHAAdmin {
private static final Log LOG = LogFactory.getLog(TestDFSHAAdmin.class);
private DFSHAAdmin tool;
private final ByteArrayOutputStream errOutBytes = new ByteArrayOutputStream();
private final ByteArrayOutputStream outBytes = new ByteArrayOutputStream();
private String errOutput;
private String output;
private HAServiceProtocol mockProtocol;
private ZKFCProtocol mockZkfcProtocol;
private static final String NSID = "ns1";
private static final HAServiceStatus STANDBY_READY_RESULT =
new HAServiceStatus(HAServiceState.STANDBY)
.setReadyToBecomeActive();
private final ArgumentCaptor<StateChangeRequestInfo> reqInfoCaptor =
ArgumentCaptor.forClass(StateChangeRequestInfo.class);
private static final String HOST_A = "1.2.3.1";
private static final String HOST_B = "1.2.3.2";
// Fencer shell commands that always return true and false respectively
// on Unix.
private static final String FENCER_TRUE_COMMAND_UNIX = "shell(true)";
private static final String FENCER_FALSE_COMMAND_UNIX = "shell(false)";
// Fencer shell commands that always return true and false respectively
// on Windows. Lacking POSIX 'true' and 'false' commands we use the DOS
// commands 'rem' and 'help.exe'.
private static final String FENCER_TRUE_COMMAND_WINDOWS = "shell(rem)";
private static final String FENCER_FALSE_COMMAND_WINDOWS = "shell(help.exe /? >NUL)";
private HdfsConfiguration getHAConf() {
HdfsConfiguration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMESERVICES, NSID);
conf.set(DFSConfigKeys.DFS_NAMESERVICE_ID, NSID);
conf.set(DFSUtil.addKeySuffixes(
DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX, NSID), "nn1,nn2");
conf.set(DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY, "nn1");
conf.set(DFSUtil.addKeySuffixes(
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, NSID, "nn1"),
HOST_A + ":12345");
conf.set(DFSUtil.addKeySuffixes(
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, NSID, "nn2"),
HOST_B + ":12345");
return conf;
}
public static String getFencerTrueCommand() {
return Shell.WINDOWS ?
FENCER_TRUE_COMMAND_WINDOWS : FENCER_TRUE_COMMAND_UNIX;
}
public static String getFencerFalseCommand() {
return Shell.WINDOWS ?
FENCER_FALSE_COMMAND_WINDOWS : FENCER_FALSE_COMMAND_UNIX;
}
@Before
public void setup() throws IOException {
mockProtocol = MockitoUtil.mockProtocol(HAServiceProtocol.class);
mockZkfcProtocol = MockitoUtil.mockProtocol(ZKFCProtocol.class);
tool = new DFSHAAdmin() {
@Override
protected HAServiceTarget resolveTarget(String nnId) {
HAServiceTarget target = super.resolveTarget(nnId);
HAServiceTarget spy = Mockito.spy(target);
// OVerride the target to return our mock protocol
try {
Mockito.doReturn(mockProtocol).when(spy).getProxy(
Mockito.<Configuration>any(), Mockito.anyInt());
Mockito.doReturn(mockZkfcProtocol).when(spy).getZKFCProxy(
Mockito.<Configuration>any(), Mockito.anyInt());
} catch (IOException e) {
throw new AssertionError(e); // mock setup doesn't really throw
}
return spy;
}
};
tool.setConf(getHAConf());
tool.setErrOut(new PrintStream(errOutBytes));
tool.setOut(new PrintStream(outBytes));
}
private void assertOutputContains(String string) {
if (!errOutput.contains(string) && !output.contains(string)) {
fail("Expected output to contain '" + string +
"' but err_output was:\n" + errOutput +
"\n and output was: \n" + output);
}
}
@Test
public void testNameserviceOption() throws Exception {
assertEquals(-1, runTool("-ns"));
assertOutputContains("Missing nameservice ID");
assertEquals(-1, runTool("-ns", "ns1"));
assertOutputContains("Missing command");
// "ns1" isn't defined but we check this lazily and help doesn't use the ns
assertEquals(0, runTool("-ns", "ns1", "-help", "transitionToActive"));
assertOutputContains("Transitions the service into Active");
}
@Test
public void testNamenodeResolution() throws Exception {
Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
assertEquals(0, runTool("-getServiceState", "nn1"));
Mockito.verify(mockProtocol).getServiceStatus();
assertEquals(-1, runTool("-getServiceState", "undefined"));
assertOutputContains(
"Unable to determine service address for namenode 'undefined'");
}
@Test
public void testHelp() throws Exception {
assertEquals(0, runTool("-help"));
assertEquals(0, runTool("-help", "transitionToActive"));
assertOutputContains("Transitions the service into Active");
}
@Test
public void testTransitionToActive() throws Exception {
Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
assertEquals(0, runTool("-transitionToActive", "nn1"));
Mockito.verify(mockProtocol).transitionToActive(
reqInfoCaptor.capture());
assertEquals(RequestSource.REQUEST_BY_USER,
reqInfoCaptor.getValue().getSource());
}
/**
* Test that, if automatic HA is enabled, none of the mutative operations
* will succeed, unless the -forcemanual flag is specified.
* @throws Exception
*/
@Test
public void testMutativeOperationsWithAutoHaEnabled() throws Exception {
Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
// Turn on auto-HA in the config
HdfsConfiguration conf = getHAConf();
conf.setBoolean(DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY, true);
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand());
tool.setConf(conf);
// Should fail without the forcemanual flag
assertEquals(-1, runTool("-transitionToActive", "nn1"));
assertTrue(errOutput.contains("Refusing to manually manage"));
assertEquals(-1, runTool("-transitionToStandby", "nn1"));
assertTrue(errOutput.contains("Refusing to manually manage"));
Mockito.verify(mockProtocol, Mockito.never())
.transitionToActive(anyReqInfo());
Mockito.verify(mockProtocol, Mockito.never())
.transitionToStandby(anyReqInfo());
// Force flag should bypass the check and change the request source
// for the RPC
setupConfirmationOnSystemIn();
assertEquals(0, runTool("-transitionToActive", "-forcemanual", "nn1"));
setupConfirmationOnSystemIn();
assertEquals(0, runTool("-transitionToStandby", "-forcemanual", "nn1"));
Mockito.verify(mockProtocol, Mockito.times(1)).transitionToActive(
reqInfoCaptor.capture());
Mockito.verify(mockProtocol, Mockito.times(1)).transitionToStandby(
reqInfoCaptor.capture());
// All of the RPCs should have had the "force" source
for (StateChangeRequestInfo ri : reqInfoCaptor.getAllValues()) {
assertEquals(RequestSource.REQUEST_BY_USER_FORCED, ri.getSource());
}
}
/**
* Setup System.in with a stream that feeds a "yes" answer on the
* next prompt.
*/
private static void setupConfirmationOnSystemIn() {
// Answer "yes" to the prompt about transition to active
System.setIn(new ByteArrayInputStream("yes\n".getBytes()));
}
/**
* Test that, even if automatic HA is enabled, the monitoring operations
* still function correctly.
*/
@Test
public void testMonitoringOperationsWithAutoHaEnabled() throws Exception {
Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
// Turn on auto-HA
HdfsConfiguration conf = getHAConf();
conf.setBoolean(DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY, true);
tool.setConf(conf);
assertEquals(0, runTool("-checkHealth", "nn1"));
Mockito.verify(mockProtocol).monitorHealth();
assertEquals(0, runTool("-getServiceState", "nn1"));
Mockito.verify(mockProtocol).getServiceStatus();
}
@Test
public void testTransitionToStandby() throws Exception {
assertEquals(0, runTool("-transitionToStandby", "nn1"));
Mockito.verify(mockProtocol).transitionToStandby(anyReqInfo());
}
@Test
public void testFailoverWithNoFencerConfigured() throws Exception {
Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
assertEquals(-1, runTool("-failover", "nn1", "nn2"));
}
@Test
public void testFailoverWithFencerConfigured() throws Exception {
Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
HdfsConfiguration conf = getHAConf();
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand());
tool.setConf(conf);
assertEquals(0, runTool("-failover", "nn1", "nn2"));
}
@Test
public void testFailoverWithFencerAndNameservice() throws Exception {
Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
HdfsConfiguration conf = getHAConf();
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand());
tool.setConf(conf);
assertEquals(0, runTool("-ns", "ns1", "-failover", "nn1", "nn2"));
}
@Test
public void testFailoverWithFencerConfiguredAndForce() throws Exception {
Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
HdfsConfiguration conf = getHAConf();
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand());
tool.setConf(conf);
assertEquals(0, runTool("-failover", "nn1", "nn2", "--forcefence"));
}
@Test
public void testFailoverWithForceActive() throws Exception {
Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
HdfsConfiguration conf = getHAConf();
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand());
tool.setConf(conf);
assertEquals(0, runTool("-failover", "nn1", "nn2", "--forceactive"));
}
@Test
public void testFailoverWithInvalidFenceArg() throws Exception {
Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
HdfsConfiguration conf = getHAConf();
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand());
tool.setConf(conf);
assertEquals(-1, runTool("-failover", "nn1", "nn2", "notforcefence"));
}
@Test
public void testFailoverWithFenceButNoFencer() throws Exception {
Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
assertEquals(-1, runTool("-failover", "nn1", "nn2", "--forcefence"));
}
@Test
public void testFailoverWithFenceAndBadFencer() throws Exception {
Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
HdfsConfiguration conf = getHAConf();
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "foobar!");
tool.setConf(conf);
assertEquals(-1, runTool("-failover", "nn1", "nn2", "--forcefence"));
}
@Test
public void testFailoverWithAutoHa() throws Exception {
Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
// Turn on auto-HA in the config
HdfsConfiguration conf = getHAConf();
conf.setBoolean(DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY, true);
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand());
tool.setConf(conf);
assertEquals(0, runTool("-failover", "nn1", "nn2"));
Mockito.verify(mockZkfcProtocol).gracefulFailover();
}
@Test
public void testForceFenceOptionListedBeforeArgs() throws Exception {
Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
HdfsConfiguration conf = getHAConf();
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand());
tool.setConf(conf);
assertEquals(0, runTool("-failover", "--forcefence", "nn1", "nn2"));
}
@Test
public void testGetServiceStatus() throws Exception {
Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
assertEquals(0, runTool("-getServiceState", "nn1"));
Mockito.verify(mockProtocol).getServiceStatus();
}
@Test
public void testCheckHealth() throws Exception {
assertEquals(0, runTool("-checkHealth", "nn1"));
Mockito.verify(mockProtocol).monitorHealth();
Mockito.doThrow(new HealthCheckFailedException("fake health check failure"))
.when(mockProtocol).monitorHealth();
assertEquals(-1, runTool("-checkHealth", "nn1"));
assertOutputContains("Health check failed: fake health check failure");
}
/**
* Test that the fencing configuration can be overridden per-nameservice
* or per-namenode
*/
@Test
public void testFencingConfigPerNameNode() throws Exception {
Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
final String nsSpecificKey = DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY + "." + NSID;
final String nnSpecificKey = nsSpecificKey + ".nn1";
HdfsConfiguration conf = getHAConf();
// Set the default fencer to succeed
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand());
tool.setConf(conf);
assertEquals(0, runTool("-failover", "nn1", "nn2", "--forcefence"));
// Set the NN-specific fencer to fail. Should fail to fence.
conf.set(nnSpecificKey, getFencerFalseCommand());
tool.setConf(conf);
assertEquals(-1, runTool("-failover", "nn1", "nn2", "--forcefence"));
conf.unset(nnSpecificKey);
// Set an NS-specific fencer to fail. Should fail.
conf.set(nsSpecificKey, getFencerFalseCommand());
tool.setConf(conf);
assertEquals(-1, runTool("-failover", "nn1", "nn2", "--forcefence"));
// Set the NS-specific fencer to succeed. Should succeed
conf.set(nsSpecificKey, getFencerTrueCommand());
tool.setConf(conf);
assertEquals(0, runTool("-failover", "nn1", "nn2", "--forcefence"));
}
private Object runTool(String ... args) throws Exception {
errOutBytes.reset();
outBytes.reset();
LOG.info("Running: DFSHAAdmin " + Joiner.on(" ").join(args));
int ret = tool.run(args);
errOutput = new String(errOutBytes.toByteArray(), Charsets.UTF_8);
output = new String(outBytes.toByteArray(), Charsets.UTF_8);
LOG.info("Err_output:\n" + errOutput + "\nOutput:\n" + output);
return ret;
}
private StateChangeRequestInfo anyReqInfo() {
return Mockito.any();
}
}
| 16,449 | 38.073634 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
/**
* Test StoragePolicyAdmin commands
*/
public class TestStoragePolicyCommands {
private static final short REPL = 1;
private static final int SIZE = 128;
private static Configuration conf;
private static MiniDFSCluster cluster;
private static DistributedFileSystem fs;
@Before
public void clusterSetUp() throws IOException {
conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL).build();
cluster.waitActive();
fs = cluster.getFileSystem();
}
@After
public void clusterShutdown() throws IOException{
if(fs != null) {
fs.close();
}
if(cluster != null) {
cluster.shutdown();
}
}
@Test
public void testSetAndGetStoragePolicy() throws Exception {
final Path foo = new Path("/foo");
final Path bar = new Path(foo, "bar");
DFSTestUtil.createFile(fs, bar, SIZE, REPL, 0);
final StoragePolicyAdmin admin = new StoragePolicyAdmin(conf);
DFSTestUtil.toolRun(admin, "-getStoragePolicy -path /foo", 0,
"The storage policy of " + foo.toString() + " is unspecified");
DFSTestUtil.toolRun(admin, "-getStoragePolicy -path /foo/bar", 0,
"The storage policy of " + bar.toString() + " is unspecified");
DFSTestUtil.toolRun(admin, "-setStoragePolicy -path /foo -policy WARM", 0,
"Set storage policy WARM on " + foo.toString());
DFSTestUtil.toolRun(admin, "-setStoragePolicy -path /foo/bar -policy COLD",
0, "Set storage policy COLD on " + bar.toString());
DFSTestUtil.toolRun(admin, "-setStoragePolicy -path /fooz -policy WARM",
2, "File/Directory does not exist: /fooz");
final BlockStoragePolicySuite suite = BlockStoragePolicySuite
.createDefaultSuite();
final BlockStoragePolicy warm = suite.getPolicy("WARM");
final BlockStoragePolicy cold = suite.getPolicy("COLD");
DFSTestUtil.toolRun(admin, "-getStoragePolicy -path /foo", 0,
"The storage policy of " + foo.toString() + ":\n" + warm);
DFSTestUtil.toolRun(admin, "-getStoragePolicy -path /foo/bar", 0,
"The storage policy of " + bar.toString() + ":\n" + cold);
DFSTestUtil.toolRun(admin, "-getStoragePolicy -path /fooz", 2,
"File/Directory does not exist: /fooz");
}
}
| 3,616 | 37.478723 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ha.HAAdmin;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.util.Shell;
import org.apache.log4j.Level;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import com.google.common.base.Charsets;
import com.google.common.base.Joiner;
import com.google.common.io.Files;
/**
* Tests for HAAdmin command with {@link MiniDFSCluster} set up in HA mode.
*/
public class TestDFSHAAdminMiniCluster {
static {
((Log4JLogger)LogFactory.getLog(HAAdmin.class)).getLogger().setLevel(
Level.ALL);
}
private static final Log LOG = LogFactory.getLog(TestDFSHAAdminMiniCluster.class);
private MiniDFSCluster cluster;
private Configuration conf;
private DFSHAAdmin tool;
private final ByteArrayOutputStream errOutBytes = new ByteArrayOutputStream();
private String errOutput;
private int nn1Port;
@Before
public void setup() throws IOException {
conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0)
.build();
tool = new DFSHAAdmin();
tool.setConf(conf);
tool.setErrOut(new PrintStream(errOutBytes));
cluster.waitActive();
nn1Port = cluster.getNameNodePort(0);
}
@After
public void shutdown() throws Exception {
cluster.shutdown();
}
@Test
public void testGetServiceState() throws Exception {
assertEquals(0, runTool("-getServiceState", "nn1"));
assertEquals(0, runTool("-getServiceState", "nn2"));
cluster.transitionToActive(0);
assertEquals(0, runTool("-getServiceState", "nn1"));
NameNodeAdapter.enterSafeMode(cluster.getNameNode(0), false);
assertEquals(0, runTool("-getServiceState", "nn1"));
}
@Test
public void testStateTransition() throws Exception {
NameNode nnode1 = cluster.getNameNode(0);
assertTrue(nnode1.isStandbyState());
assertEquals(0, runTool("-transitionToActive", "nn1"));
assertFalse(nnode1.isStandbyState());
assertEquals(0, runTool("-transitionToStandby", "nn1"));
assertTrue(nnode1.isStandbyState());
NameNode nnode2 = cluster.getNameNode(1);
assertTrue(nnode2.isStandbyState());
assertEquals(0, runTool("-transitionToActive", "nn2"));
assertFalse(nnode2.isStandbyState());
assertEquals(0, runTool("-transitionToStandby", "nn2"));
assertTrue(nnode2.isStandbyState());
}
@Test
public void testTryFailoverToSafeMode() throws Exception {
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,
TestDFSHAAdmin.getFencerTrueCommand());
tool.setConf(conf);
NameNodeAdapter.enterSafeMode(cluster.getNameNode(0), false);
assertEquals(-1, runTool("-failover", "nn2", "nn1"));
assertTrue("Bad output: " + errOutput,
errOutput.contains("is not ready to become active: " +
"The NameNode is in safemode"));
}
/**
* Test failover with various options
*/
@Test
public void testFencer() throws Exception {
// Test failover with no fencer
assertEquals(-1, runTool("-failover", "nn1", "nn2"));
// Set up fencer to write info about the fencing target into a
// tmp file, so we can verify that the args were substituted right
File tmpFile = File.createTempFile("testFencer", ".txt");
tmpFile.deleteOnExit();
if (Shell.WINDOWS) {
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,
"shell(echo %target_nameserviceid%.%target_namenodeid% " +
"%target_port% %dfs_ha_namenode_id% > " +
tmpFile.getAbsolutePath() + ")");
} else {
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,
"shell(echo -n $target_nameserviceid.$target_namenodeid " +
"$target_port $dfs_ha_namenode_id > " +
tmpFile.getAbsolutePath() + ")");
}
// Test failover with fencer
tool.setConf(conf);
assertEquals(0, runTool("-transitionToActive", "nn1"));
assertEquals(0, runTool("-failover", "nn1", "nn2"));
// Test failover with fencer and nameservice
assertEquals(0, runTool("-ns", "minidfs-ns", "-failover", "nn2", "nn1"));
// Fencer has not run yet, since none of the above required fencing
assertEquals("", Files.toString(tmpFile, Charsets.UTF_8));
// Test failover with fencer and forcefence option
assertEquals(0, runTool("-failover", "nn1", "nn2", "--forcefence"));
// The fence script should run with the configuration from the target
// node, rather than the configuration from the fencing node. Strip
// out any trailing spaces and CR/LFs which may be present on Windows.
String fenceCommandOutput =Files.toString(tmpFile, Charsets.UTF_8).
replaceAll(" *[\r\n]+", "");
assertEquals("minidfs-ns.nn1 " + nn1Port + " nn1", fenceCommandOutput);
tmpFile.delete();
// Test failover with forceactive option
assertEquals(0, runTool("-failover", "nn2", "nn1", "--forceactive"));
// Fencing should not occur, since it was graceful
assertFalse(tmpFile.exists());
// Test failover with not fencer and forcefence option
conf.unset(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY);
tool.setConf(conf);
assertEquals(-1, runTool("-failover", "nn1", "nn2", "--forcefence"));
assertFalse(tmpFile.exists());
// Test failover with bad fencer and forcefence option
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "foobar!");
tool.setConf(conf);
assertEquals(-1, runTool("-failover", "nn1", "nn2", "--forcefence"));
assertFalse(tmpFile.exists());
// Test failover with force fence listed before the other arguments
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,
TestDFSHAAdmin.getFencerTrueCommand());
tool.setConf(conf);
assertEquals(0, runTool("-failover", "--forcefence", "nn1", "nn2"));
}
@Test
public void testCheckHealth() throws Exception {
assertEquals(0, runTool("-checkHealth", "nn1"));
assertEquals(0, runTool("-checkHealth", "nn2"));
}
/**
* Test case to check whether both the name node is active or not
* @throws Exception
*/
@Test
public void testTransitionToActiveWhenOtherNamenodeisActive()
throws Exception {
NameNode nn1 = cluster.getNameNode(0);
NameNode nn2 = cluster.getNameNode(1);
if(nn1.getState() != null && !nn1.getState().
equals(HAServiceState.STANDBY.name()) ) {
cluster.transitionToStandby(0);
}
if(nn2.getState() != null && !nn2.getState().
equals(HAServiceState.STANDBY.name()) ) {
cluster.transitionToStandby(1);
}
//Making sure both the namenode are in standby state
assertTrue(nn1.isStandbyState());
assertTrue(nn2.isStandbyState());
// Triggering the transition for both namenode to Active
runTool("-transitionToActive", "nn1");
runTool("-transitionToActive", "nn2");
assertFalse("Both namenodes cannot be active", nn1.isActiveState()
&& nn2.isActiveState());
/* In this test case, we have deliberately shut down nn1 and this will
cause HAAAdmin#isOtherTargetNodeActive to throw an Exception
and transitionToActive for nn2 with forceActive switch will succeed
even with Exception */
cluster.shutdownNameNode(0);
if(nn2.getState() != null && !nn2.getState().
equals(HAServiceState.STANDBY.name()) ) {
cluster.transitionToStandby(1);
}
//Making sure both the namenode (nn2) is in standby state
assertTrue(nn2.isStandbyState());
assertFalse(cluster.isNameNodeUp(0));
runTool("-transitionToActive", "nn2", "--forceactive");
assertTrue("Namenode nn2 should be active", nn2.isActiveState());
}
private int runTool(String ... args) throws Exception {
errOutBytes.reset();
LOG.info("Running: DFSHAAdmin " + Joiner.on(" ").join(args));
int ret = tool.run(args);
errOutput = new String(errOutBytes.toByteArray(), Charsets.UTF_8);
LOG.info("Output:\n" + errOutput);
return ret;
}
}
| 9,607 | 35.812261 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetGroups.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools;
import java.io.IOException;
import java.io.PrintStream;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.tools.GetGroupsTestBase;
import org.apache.hadoop.util.Tool;
import org.junit.After;
import org.junit.Before;
/**
* Tests for the HDFS implementation of {@link GetGroups}
*/
public class TestGetGroups extends GetGroupsTestBase {
private MiniDFSCluster cluster;
@Before
public void setUpNameNode() throws IOException {
conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
}
@After
public void tearDownNameNode() {
cluster.shutdown();
}
@Override
protected Tool getTool(PrintStream o) {
return new GetGroups(conf, o);
}
}
| 1,638 | 29.351852 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
import com.google.common.collect.Lists;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.ReconfigurationUtil;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
import java.util.ArrayList;
import java.util.List;
import java.util.Scanner;
import static org.hamcrest.CoreMatchers.allOf;
import static org.hamcrest.CoreMatchers.anyOf;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.CoreMatchers.not;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import static org.hamcrest.CoreMatchers.containsString;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class TestDFSAdmin {
private Configuration conf = null;
private MiniDFSCluster cluster;
private DFSAdmin admin;
private DataNode datanode;
@Before
public void setUp() throws Exception {
conf = new Configuration();
restartCluster();
admin = new DFSAdmin();
}
@After
public void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
cluster = null;
}
}
private void restartCluster() throws IOException {
if (cluster != null) {
cluster.shutdown();
}
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
datanode = cluster.getDataNodes().get(0);
}
private List<String> getReconfigureStatus(String nodeType, String address)
throws IOException {
ByteArrayOutputStream bufOut = new ByteArrayOutputStream();
PrintStream out = new PrintStream(bufOut);
ByteArrayOutputStream bufErr = new ByteArrayOutputStream();
PrintStream err = new PrintStream(bufErr);
admin.getReconfigurationStatus(nodeType, address, out, err);
Scanner scanner = new Scanner(bufOut.toString());
List<String> outputs = Lists.newArrayList();
while (scanner.hasNextLine()) {
outputs.add(scanner.nextLine());
}
return outputs;
}
/**
* Test reconfiguration and check the status outputs.
* @param expectedSuccuss set true if the reconfiguration task should success.
* @throws IOException
* @throws InterruptedException
*/
private void testGetReconfigurationStatus(boolean expectedSuccuss)
throws IOException, InterruptedException {
ReconfigurationUtil ru = mock(ReconfigurationUtil.class);
datanode.setReconfigurationUtil(ru);
List<ReconfigurationUtil.PropertyChange> changes =
new ArrayList<>();
File newDir = new File(cluster.getDataDirectory(), "data_new");
if (expectedSuccuss) {
newDir.mkdirs();
} else {
// Inject failure.
newDir.createNewFile();
}
changes.add(new ReconfigurationUtil.PropertyChange(
DFS_DATANODE_DATA_DIR_KEY, newDir.toString(),
datanode.getConf().get(DFS_DATANODE_DATA_DIR_KEY)));
changes.add(new ReconfigurationUtil.PropertyChange(
"randomKey", "new123", "old456"));
when(ru.parseChangedProperties(any(Configuration.class),
any(Configuration.class))).thenReturn(changes);
final int port = datanode.getIpcPort();
final String address = "localhost:" + port;
assertThat(admin.startReconfiguration("datanode", address), is(0));
List<String> outputs = null;
int count = 100;
while (count > 0) {
outputs = getReconfigureStatus("datanode", address);
if (!outputs.isEmpty() && outputs.get(0).contains("finished")) {
break;
}
count--;
Thread.sleep(100);
}
assertTrue(count > 0);
if (expectedSuccuss) {
assertThat(outputs.size(), is(4));
} else {
assertThat(outputs.size(), is(6));
}
List<StorageLocation> locations = DataNode.getStorageLocations(
datanode.getConf());
if (expectedSuccuss) {
assertThat(locations.size(), is(1));
assertThat(locations.get(0).getFile(), is(newDir));
// Verify the directory is appropriately formatted.
assertTrue(new File(newDir, Storage.STORAGE_DIR_CURRENT).isDirectory());
} else {
assertTrue(locations.isEmpty());
}
int offset = 1;
if (expectedSuccuss) {
assertThat(outputs.get(offset),
containsString("SUCCESS: Changed property " +
DFS_DATANODE_DATA_DIR_KEY));
} else {
assertThat(outputs.get(offset),
containsString("FAILED: Change property " +
DFS_DATANODE_DATA_DIR_KEY));
}
assertThat(outputs.get(offset + 1),
is(allOf(containsString("From:"), containsString("data1"),
containsString("data2"))));
assertThat(outputs.get(offset + 2),
is(not(anyOf(containsString("data1"), containsString("data2")))));
assertThat(outputs.get(offset + 2),
is(allOf(containsString("To"), containsString("data_new"))));
}
@Test(timeout = 30000)
public void testGetReconfigurationStatus()
throws IOException, InterruptedException {
testGetReconfigurationStatus(true);
restartCluster();
testGetReconfigurationStatus(false);
}
private List<String> getReconfigurationAllowedProperties(
String nodeType, String address)
throws IOException {
ByteArrayOutputStream bufOut = new ByteArrayOutputStream();
PrintStream out = new PrintStream(bufOut);
ByteArrayOutputStream bufErr = new ByteArrayOutputStream();
PrintStream err = new PrintStream(bufErr);
admin.getReconfigurableProperties(nodeType, address, out, err);
Scanner scanner = new Scanner(bufOut.toString());
List<String> outputs = Lists.newArrayList();
while (scanner.hasNextLine()) {
outputs.add(scanner.nextLine());
}
return outputs;
}
@Test(timeout = 30000)
public void testGetReconfigAllowedProperties() throws IOException {
final int port = datanode.getIpcPort();
final String address = "localhost:" + port;
List<String> outputs =
getReconfigurationAllowedProperties("datanode", address);
assertEquals(2, outputs.size());
assertEquals(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
outputs.get(1));
}
}
| 7,459 | 33.859813 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_INTERNAL_NAMESERVICES_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.PrintStream;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.StringTokenizer;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DFSUtil.ConfiguredNNAddress;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.tools.GetConf.Command;
import org.apache.hadoop.hdfs.tools.GetConf.CommandHandler;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.util.ToolRunner;
import org.junit.Test;
import com.google.common.base.Joiner;
/**
* Test for {@link GetConf}
*/
public class TestGetConf {
enum TestType {
NAMENODE, BACKUP, SECONDARY, NNRPCADDRESSES
}
FileSystem localFileSys;
/** Setup federation nameServiceIds in the configuration */
private void setupNameServices(HdfsConfiguration conf, int nameServiceIdCount) {
StringBuilder nsList = new StringBuilder();
for (int i = 0; i < nameServiceIdCount; i++) {
if (nsList.length() > 0) {
nsList.append(",");
}
nsList.append(getNameServiceId(i));
}
conf.set(DFS_NAMESERVICES, nsList.toString());
}
/** Set a given key with value as address, for all the nameServiceIds.
* @param conf configuration to set the addresses in
* @param key configuration key
* @param nameServiceIdCount Number of nameServices for which the key is set
* @param portOffset starting port offset
* @return list of addresses that are set in the configuration
*/
private String[] setupAddress(HdfsConfiguration conf, String key,
int nameServiceIdCount, int portOffset) {
String[] values = new String[nameServiceIdCount];
for (int i = 0; i < nameServiceIdCount; i++, portOffset++) {
String nsID = getNameServiceId(i);
String specificKey = DFSUtil.addKeySuffixes(key, nsID);
values[i] = "nn" + i + ":" + portOffset;
conf.set(specificKey, values[i]);
}
return values;
}
/**
* Add namenodes to the static resolution list to avoid going
* through DNS which can be really slow in some configurations.
*/
private void setupStaticHostResolution(int nameServiceIdCount) {
for (int i = 0; i < nameServiceIdCount; i++) {
NetUtils.addStaticResolution("nn" + i, "localhost");
}
}
/*
* Convert the map returned from DFSUtil functions to an array of
* addresses represented as "host:port"
*/
private String[] toStringArray(List<ConfiguredNNAddress> list) {
String[] ret = new String[list.size()];
for (int i = 0; i < list.size(); i++) {
ret[i] = NetUtils.getHostPortString(list.get(i).getAddress());
}
return ret;
}
/**
* Using DFSUtil methods get the list of given {@code type} of address
*/
private Map<String, Map<String, InetSocketAddress>> getAddressListFromConf(
TestType type, HdfsConfiguration conf) throws IOException {
switch (type) {
case NAMENODE:
return DFSUtil.getNNServiceRpcAddressesForCluster(conf);
case BACKUP:
return DFSUtil.getBackupNodeAddresses(conf);
case SECONDARY:
return DFSUtil.getSecondaryNameNodeAddresses(conf);
case NNRPCADDRESSES:
return DFSUtil.getNNServiceRpcAddressesForCluster(conf);
}
return null;
}
private String runTool(HdfsConfiguration conf, String[] args, boolean success)
throws Exception {
ByteArrayOutputStream o = new ByteArrayOutputStream();
PrintStream out = new PrintStream(o, true);
try {
int ret = ToolRunner.run(new GetConf(conf, out, out), args);
out.flush();
System.err.println("Output: " + o.toString());
assertEquals("Expected " + (success?"success":"failure") +
" for args: " + Joiner.on(" ").join(args) + "\n" +
"Output: " + o.toString(),
success, ret == 0);
return o.toString();
} finally {
o.close();
out.close();
}
}
/**
* Get address list for a given type of address. Command expected to
* fail if {@code success} is false.
* @return returns the success or error output from the tool.
*/
private String getAddressListFromTool(TestType type, HdfsConfiguration conf,
boolean success)
throws Exception {
String[] args = new String[1];
switch (type) {
case NAMENODE:
args[0] = Command.NAMENODE.getName();
break;
case BACKUP:
args[0] = Command.BACKUP.getName();
break;
case SECONDARY:
args[0] = Command.SECONDARY.getName();
break;
case NNRPCADDRESSES:
args[0] = Command.NNRPCADDRESSES.getName();
break;
}
return runTool(conf, args, success);
}
/**
* Using {@link GetConf} methods get the list of given {@code type} of
* addresses
*
* @param type, TestType
* @param conf, configuration
* @param checkPort, If checkPort is true, verify NNPRCADDRESSES whose
* expected value is hostname:rpc-port. If checkPort is false, the
* expected is hostname only.
* @param expected, expected addresses
*/
private void getAddressListFromTool(TestType type, HdfsConfiguration conf,
boolean checkPort, List<ConfiguredNNAddress> expected) throws Exception {
String out = getAddressListFromTool(type, conf, expected.size() != 0);
List<String> values = new ArrayList<String>();
// Convert list of addresses returned to an array of string
StringTokenizer tokenizer = new StringTokenizer(out);
while (tokenizer.hasMoreTokens()) {
String s = tokenizer.nextToken().trim();
values.add(s);
}
String[] actual = values.toArray(new String[values.size()]);
// Convert expected list to String[] of hosts
int i = 0;
String[] expectedHosts = new String[expected.size()];
for (ConfiguredNNAddress cnn : expected) {
InetSocketAddress addr = cnn.getAddress();
if (!checkPort) {
expectedHosts[i++] = addr.getHostName();
}else {
expectedHosts[i++] = addr.getHostName()+":"+addr.getPort();
}
}
// Compare two arrays
assertTrue(Arrays.equals(expectedHosts, actual));
}
private void verifyAddresses(HdfsConfiguration conf, TestType type,
boolean checkPort, String... expected) throws Exception {
// Ensure DFSUtil returned the right set of addresses
Map<String, Map<String, InetSocketAddress>> map =
getAddressListFromConf(type, conf);
List<ConfiguredNNAddress> list = DFSUtil.flattenAddressMap(map);
String[] actual = toStringArray(list);
Arrays.sort(actual);
Arrays.sort(expected);
assertArrayEquals(expected, actual);
// Test GetConf returned addresses
getAddressListFromTool(type, conf, checkPort, list);
}
private static String getNameServiceId(int index) {
return "ns" + index;
}
/**
* Test empty configuration
*/
@Test(timeout=10000)
public void testEmptyConf() throws Exception {
HdfsConfiguration conf = new HdfsConfiguration(false);
// Verify getting addresses fails
getAddressListFromTool(TestType.NAMENODE, conf, false);
System.out.println(getAddressListFromTool(TestType.BACKUP, conf, false));
getAddressListFromTool(TestType.SECONDARY, conf, false);
getAddressListFromTool(TestType.NNRPCADDRESSES, conf, false);
for (Command cmd : Command.values()) {
String arg = cmd.getName();
CommandHandler handler = Command.getHandler(arg);
assertNotNull("missing handler: " + cmd, handler);
if (handler.key != null) {
// First test with configuration missing the required key
String[] args = {handler.key};
runTool(conf, args, false);
}
}
}
/**
* Test invalid argument to the tool
*/
@Test(timeout=10000)
public void testInvalidArgument() throws Exception {
HdfsConfiguration conf = new HdfsConfiguration();
String[] args = {"-invalidArgument"};
String ret = runTool(conf, args, false);
assertTrue(ret.contains(GetConf.USAGE));
}
/**
* Tests to make sure the returned addresses are correct in case of default
* configuration with no federation
*/
@Test(timeout=10000)
public void testNonFederation() throws Exception {
HdfsConfiguration conf = new HdfsConfiguration(false);
// Returned namenode address should match default address
conf.set(FS_DEFAULT_NAME_KEY, "hdfs://localhost:1000");
verifyAddresses(conf, TestType.NAMENODE, false, "localhost:1000");
verifyAddresses(conf, TestType.NNRPCADDRESSES, true, "localhost:1000");
// Returned address should match backupnode RPC address
conf.set(DFS_NAMENODE_BACKUP_ADDRESS_KEY,"localhost:1001");
verifyAddresses(conf, TestType.BACKUP, false, "localhost:1001");
// Returned address should match secondary http address
conf.set(DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "localhost:1002");
verifyAddresses(conf, TestType.SECONDARY, false, "localhost:1002");
// Returned namenode address should match service RPC address
conf = new HdfsConfiguration();
conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "localhost:1000");
conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, "localhost:1001");
verifyAddresses(conf, TestType.NAMENODE, false, "localhost:1000");
verifyAddresses(conf, TestType.NNRPCADDRESSES, true, "localhost:1000");
// Returned address should match RPC address
conf = new HdfsConfiguration();
conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, "localhost:1001");
verifyAddresses(conf, TestType.NAMENODE, false, "localhost:1001");
verifyAddresses(conf, TestType.NNRPCADDRESSES, true, "localhost:1001");
}
/**
* Tests to make sure the returned addresses are correct in case of federation
* of setup.
*/
@Test(timeout=10000)
public void testFederation() throws Exception {
final int nsCount = 10;
HdfsConfiguration conf = new HdfsConfiguration(false);
// Test to ensure namenode, backup and secondary namenode addresses are
// returned from federation configuration. Returned namenode addresses are
// based on service RPC address and not regular RPC address
setupNameServices(conf, nsCount);
String[] nnAddresses = setupAddress(conf,
DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, nsCount, 1000);
setupAddress(conf, DFS_NAMENODE_RPC_ADDRESS_KEY, nsCount, 1500);
setupStaticHostResolution(nsCount);
String[] backupAddresses = setupAddress(conf,
DFS_NAMENODE_BACKUP_ADDRESS_KEY, nsCount, 2000);
String[] secondaryAddresses = setupAddress(conf,
DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, nsCount, 3000);
verifyAddresses(conf, TestType.NAMENODE, false, nnAddresses);
verifyAddresses(conf, TestType.BACKUP, false, backupAddresses);
verifyAddresses(conf, TestType.SECONDARY, false, secondaryAddresses);
verifyAddresses(conf, TestType.NNRPCADDRESSES, true, nnAddresses);
// Test to ensure namenode, backup, secondary namenode addresses and
// namenode rpc addresses are returned from federation configuration.
// Returned namenode addresses are based on regular RPC address
// in the absence of service RPC address.
conf = new HdfsConfiguration(false);
setupNameServices(conf, nsCount);
nnAddresses = setupAddress(conf,
DFS_NAMENODE_RPC_ADDRESS_KEY, nsCount, 1000);
backupAddresses = setupAddress(conf,
DFS_NAMENODE_BACKUP_ADDRESS_KEY, nsCount, 2000);
secondaryAddresses = setupAddress(conf,
DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, nsCount, 3000);
verifyAddresses(conf, TestType.NAMENODE, false, nnAddresses);
verifyAddresses(conf, TestType.BACKUP, false, backupAddresses);
verifyAddresses(conf, TestType.SECONDARY, false, secondaryAddresses);
verifyAddresses(conf, TestType.NNRPCADDRESSES, true, nnAddresses);
}
@Test(timeout=10000)
public void testGetSpecificKey() throws Exception {
HdfsConfiguration conf = new HdfsConfiguration();
conf.set("mykey", " myval ");
String[] args = {"-confKey", "mykey"};
String toolResult = runTool(conf, args, true);
assertEquals(String.format("myval%n"), toolResult);
}
@Test(timeout=10000)
public void testExtraArgsThrowsError() throws Exception {
HdfsConfiguration conf = new HdfsConfiguration();
conf.set("mykey", "myval");
String[] args = {"-namenodes", "unexpected-arg"};
assertTrue(runTool(conf, args, false).contains(
"Did not expect argument: unexpected-arg"));
}
/**
* Tests commands other than {@link Command#NAMENODE}, {@link Command#BACKUP},
* {@link Command#SECONDARY} and {@link Command#NNRPCADDRESSES}
*/
@Test(timeout=10000)
public void testTool() throws Exception {
HdfsConfiguration conf = new HdfsConfiguration(false);
for (Command cmd : Command.values()) {
CommandHandler handler = Command.getHandler(cmd.getName());
if (handler.key != null && !"-confKey".equals(cmd.getName())) {
// Add the key to the conf and ensure tool returns the right value
String[] args = {cmd.getName()};
conf.set(handler.key, "value");
assertTrue(runTool(conf, args, true).contains("value"));
}
}
}
@Test
public void TestGetConfExcludeCommand() throws Exception{
HdfsConfiguration conf = new HdfsConfiguration();
// Set up the hosts/exclude files.
localFileSys = FileSystem.getLocal(conf);
Path workingDir = localFileSys.getWorkingDirectory();
Path dir = new Path(workingDir, System.getProperty("test.build.data", "target/test/data") + "/Getconf/");
Path hostsFile = new Path(dir, "hosts");
Path excludeFile = new Path(dir, "exclude");
// Setup conf
conf.set(DFSConfigKeys.DFS_HOSTS, hostsFile.toUri().getPath());
conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
writeConfigFile(hostsFile, null);
writeConfigFile(excludeFile, null);
String[] args = {"-excludeFile"};
String ret = runTool(conf, args, true);
assertEquals(excludeFile.toUri().getPath(),ret.trim());
cleanupFile(localFileSys, excludeFile.getParent());
}
@Test
public void TestGetConfIncludeCommand() throws Exception{
HdfsConfiguration conf = new HdfsConfiguration();
// Set up the hosts/exclude files.
localFileSys = FileSystem.getLocal(conf);
Path workingDir = localFileSys.getWorkingDirectory();
Path dir = new Path(workingDir, System.getProperty("test.build.data", "target/test/data") + "/Getconf/");
Path hostsFile = new Path(dir, "hosts");
Path excludeFile = new Path(dir, "exclude");
// Setup conf
conf.set(DFSConfigKeys.DFS_HOSTS, hostsFile.toUri().getPath());
conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
writeConfigFile(hostsFile, null);
writeConfigFile(excludeFile, null);
String[] args = {"-includeFile"};
String ret = runTool(conf, args, true);
assertEquals(hostsFile.toUri().getPath(),ret.trim());
cleanupFile(localFileSys, excludeFile.getParent());
}
@Test
public void testIncludeInternalNameServices() throws Exception {
final int nsCount = 10;
final int remoteNsCount = 4;
HdfsConfiguration conf = new HdfsConfiguration();
setupNameServices(conf, nsCount);
setupAddress(conf, DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, nsCount, 1000);
setupAddress(conf, DFS_NAMENODE_RPC_ADDRESS_KEY, nsCount, 1500);
conf.set(DFS_INTERNAL_NAMESERVICES_KEY, "ns1");
setupStaticHostResolution(nsCount);
String[] includedNN = new String[] {"nn1:1001"};
verifyAddresses(conf, TestType.NAMENODE, false, includedNN);
verifyAddresses(conf, TestType.NNRPCADDRESSES, true, includedNN);
}
private void writeConfigFile(Path name, ArrayList<String> nodes)
throws IOException {
// delete if it already exists
if (localFileSys.exists(name)) {
localFileSys.delete(name, true);
}
FSDataOutputStream stm = localFileSys.create(name);
if (nodes != null) {
for (Iterator<String> it = nodes.iterator(); it.hasNext();) {
String node = it.next();
stm.writeBytes(node);
stm.writeBytes("\n");
}
}
stm.close();
}
private void cleanupFile(FileSystem fileSys, Path name) throws IOException {
assertTrue(fileSys.exists(name));
fileSys.delete(name, true);
assertTrue(!fileSys.exists(name));
}
}
| 18,246 | 37.658898 | 109 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.util.concurrent.TimeoutException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.ha.ClientBaseWithFixes;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.ha.HealthMonitor;
import org.apache.hadoop.ha.TestNodeFencer.AlwaysSucceedFencer;
import org.apache.hadoop.ha.ZKFCTestUtil;
import org.apache.hadoop.ha.ZKFailoverController;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeResourceChecker;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
import org.apache.hadoop.test.MultithreadedTestUtil.TestingThread;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import com.google.common.base.Supplier;
import org.mockito.Mockito;
public class TestDFSZKFailoverController extends ClientBaseWithFixes {
private Configuration conf;
private MiniDFSCluster cluster;
private TestContext ctx;
private ZKFCThread thr1, thr2;
private FileSystem fs;
static {
// Make tests run faster by avoiding fsync()
EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
}
@Before
public void setup() throws Exception {
conf = new Configuration();
// Specify the quorum per-nameservice, to ensure that these configs
// can be nameservice-scoped.
conf.set(ZKFailoverController.ZK_QUORUM_KEY + ".ns1", hostPort);
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,
AlwaysSucceedFencer.class.getName());
conf.setBoolean(DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY, true);
// Turn off IPC client caching, so that the suite can handle
// the restart of the daemons between test cases.
conf.setInt(
CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
0);
conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn1", 10023);
conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn2", 10024);
MiniDFSNNTopology topology = new MiniDFSNNTopology()
.addNameservice(new MiniDFSNNTopology.NSConf("ns1")
.addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(10021))
.addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(10022)));
cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(topology)
.numDataNodes(0)
.build();
cluster.waitActive();
ctx = new TestContext();
ctx.addThread(thr1 = new ZKFCThread(ctx, 0));
assertEquals(0, thr1.zkfc.run(new String[]{"-formatZK"}));
thr1.start();
waitForHAState(0, HAServiceState.ACTIVE);
ctx.addThread(thr2 = new ZKFCThread(ctx, 1));
thr2.start();
// Wait for the ZKFCs to fully start up
ZKFCTestUtil.waitForHealthState(thr1.zkfc,
HealthMonitor.State.SERVICE_HEALTHY, ctx);
ZKFCTestUtil.waitForHealthState(thr2.zkfc,
HealthMonitor.State.SERVICE_HEALTHY, ctx);
fs = HATestUtil.configureFailoverFs(cluster, conf);
}
@After
public void shutdown() throws Exception {
cluster.shutdown();
if (thr1 != null) {
thr1.interrupt();
}
if (thr2 != null) {
thr2.interrupt();
}
if (ctx != null) {
ctx.stop();
}
}
/**
* Test that thread dump is captured after NN state changes.
*/
@Test(timeout=60000)
public void testThreadDumpCaptureAfterNNStateChange() throws Exception {
NameNodeResourceChecker mockResourceChecker = Mockito.mock(
NameNodeResourceChecker.class);
Mockito.doReturn(false).when(mockResourceChecker).hasAvailableDiskSpace();
cluster.getNameNode(0).getNamesystem()
.setNNResourceChecker(mockResourceChecker);
waitForHAState(0, HAServiceState.STANDBY);
while (!thr1.zkfc.isThreadDumpCaptured()) {
Thread.sleep(1000);
}
}
/**
* Test that automatic failover is triggered by shutting the
* active NN down.
*/
@Test(timeout=60000)
public void testFailoverAndBackOnNNShutdown() throws Exception {
Path p1 = new Path("/dir1");
Path p2 = new Path("/dir2");
// Write some data on the first NN
fs.mkdirs(p1);
// Shut it down, causing automatic failover
cluster.shutdownNameNode(0);
// Data should still exist. Write some on the new NN
assertTrue(fs.exists(p1));
fs.mkdirs(p2);
assertEquals(AlwaysSucceedFencer.getLastFencedService().getAddress(),
thr1.zkfc.getLocalTarget().getAddress());
// Start the first node back up
cluster.restartNameNode(0);
// This should have no effect -- the new node should be STANDBY.
waitForHAState(0, HAServiceState.STANDBY);
assertTrue(fs.exists(p1));
assertTrue(fs.exists(p2));
// Shut down the second node, which should failback to the first
cluster.shutdownNameNode(1);
waitForHAState(0, HAServiceState.ACTIVE);
// First node should see what was written on the second node while it was down.
assertTrue(fs.exists(p1));
assertTrue(fs.exists(p2));
assertEquals(AlwaysSucceedFencer.getLastFencedService().getAddress(),
thr2.zkfc.getLocalTarget().getAddress());
}
@Test(timeout=30000)
public void testManualFailover() throws Exception {
thr2.zkfc.getLocalTarget().getZKFCProxy(conf, 15000).gracefulFailover();
waitForHAState(0, HAServiceState.STANDBY);
waitForHAState(1, HAServiceState.ACTIVE);
thr1.zkfc.getLocalTarget().getZKFCProxy(conf, 15000).gracefulFailover();
waitForHAState(0, HAServiceState.ACTIVE);
waitForHAState(1, HAServiceState.STANDBY);
}
@Test(timeout=30000)
public void testManualFailoverWithDFSHAAdmin() throws Exception {
DFSHAAdmin tool = new DFSHAAdmin();
tool.setConf(conf);
assertEquals(0,
tool.run(new String[]{"-failover", "nn1", "nn2"}));
waitForHAState(0, HAServiceState.STANDBY);
waitForHAState(1, HAServiceState.ACTIVE);
assertEquals(0,
tool.run(new String[]{"-failover", "nn2", "nn1"}));
waitForHAState(0, HAServiceState.ACTIVE);
waitForHAState(1, HAServiceState.STANDBY);
}
private void waitForHAState(int nnidx, final HAServiceState state)
throws TimeoutException, InterruptedException {
final NameNode nn = cluster.getNameNode(nnidx);
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
try {
return nn.getRpcServer().getServiceStatus().getState() == state;
} catch (Exception e) {
e.printStackTrace();
return false;
}
}
}, 50, 15000);
}
/**
* Test-thread which runs a ZK Failover Controller corresponding
* to a given NameNode in the minicluster.
*/
private class ZKFCThread extends TestingThread {
private final DFSZKFailoverController zkfc;
public ZKFCThread(TestContext ctx, int idx) {
super(ctx);
this.zkfc = DFSZKFailoverController.create(
cluster.getConfiguration(idx));
}
@Override
public void doWork() throws Exception {
try {
assertEquals(0, zkfc.run(new String[0]));
} catch (InterruptedException ie) {
// Interrupted by main thread, that's OK.
}
}
}
}
| 8,522 | 33.930328 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools;
import java.io.ByteArrayOutputStream;
import java.io.PrintStream;
import com.google.common.base.Charsets;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster;
import org.junit.After;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
public class TestDFSAdminWithHA {
private final ByteArrayOutputStream out = new ByteArrayOutputStream();
private final ByteArrayOutputStream err = new ByteArrayOutputStream();
private MiniQJMHACluster cluster;
private Configuration conf;
private DFSAdmin admin;
private PrintStream originOut;
private PrintStream originErr;
private static final String NSID = "ns1";
private static String newLine = System.getProperty("line.separator");
private void assertOutputMatches(String string) {
String errOutput = new String(out.toByteArray(), Charsets.UTF_8);
String output = new String(out.toByteArray(), Charsets.UTF_8);
if (!errOutput.matches(string) && !output.matches(string)) {
fail("Expected output to match '" + string +
"' but err_output was:\n" + errOutput +
"\n and output was: \n" + output);
}
out.reset();
err.reset();
}
private void setHAConf(Configuration conf, String nn1Addr, String nn2Addr) {
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,
"hdfs://" + NSID);
conf.set(DFSConfigKeys.DFS_NAMESERVICES, NSID);
conf.set(DFSConfigKeys.DFS_NAMESERVICE_ID, NSID);
conf.set(DFSUtil.addKeySuffixes(
DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX, NSID), "nn1,nn2");
conf.set(DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY, "nn1");
conf.set(DFSUtil.addKeySuffixes(
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, NSID, "nn1"), nn1Addr);
conf.set(DFSUtil.addKeySuffixes(
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, NSID, "nn2"), nn2Addr);
}
private void setUpHaCluster(boolean security) throws Exception {
conf = new Configuration();
conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
security);
cluster = new MiniQJMHACluster.Builder(conf).build();
setHAConf(conf, cluster.getDfsCluster().getNameNode(0).getHostAndPort(),
cluster.getDfsCluster().getNameNode(1).getHostAndPort());
cluster.getDfsCluster().getNameNode(0).getHostAndPort();
admin = new DFSAdmin();
admin.setConf(conf);
assertTrue(HAUtil.isHAEnabled(conf, "ns1"));
originOut = System.out;
originErr = System.err;
System.setOut(new PrintStream(out));
System.setErr(new PrintStream(err));
}
@After
public void tearDown() throws Exception {
System.out.flush();
System.err.flush();
System.setOut(originOut);
System.setErr(originErr);
if (admin != null) {
admin.close();
}
if (cluster != null) {
cluster.shutdown();
}
out.reset();
err.reset();
}
@Test(timeout = 30000)
public void testSetSafeMode() throws Exception {
setUpHaCluster(false);
// Enter safemode
int exitCode = admin.run(new String[] {"-safemode", "enter"});
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Safe mode is ON in.*";
assertOutputMatches(message + newLine + message + newLine);
// Get safemode
exitCode = admin.run(new String[] {"-safemode", "get"});
assertEquals(err.toString().trim(), 0, exitCode);
message = "Safe mode is ON in.*";
assertOutputMatches(message + newLine + message + newLine);
// Leave safemode
exitCode = admin.run(new String[] {"-safemode", "leave"});
assertEquals(err.toString().trim(), 0, exitCode);
message = "Safe mode is OFF in.*";
assertOutputMatches(message + newLine + message + newLine);
// Get safemode
exitCode = admin.run(new String[] {"-safemode", "get"});
assertEquals(err.toString().trim(), 0, exitCode);
message = "Safe mode is OFF in.*";
assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000)
public void testSaveNamespace() throws Exception {
setUpHaCluster(false);
// Safe mode should be turned ON in order to create namespace image.
int exitCode = admin.run(new String[] {"-safemode", "enter"});
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Safe mode is ON in.*";
assertOutputMatches(message + newLine + message + newLine);
exitCode = admin.run(new String[] {"-saveNamespace"});
assertEquals(err.toString().trim(), 0, exitCode);
message = "Save namespace successful for.*";
assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000)
public void testRestoreFailedStorage() throws Exception {
setUpHaCluster(false);
int exitCode = admin.run(new String[] {"-restoreFailedStorage", "check"});
assertEquals(err.toString().trim(), 0, exitCode);
String message = "restoreFailedStorage is set to false for.*";
// Default is false
assertOutputMatches(message + newLine + message + newLine);
exitCode = admin.run(new String[] {"-restoreFailedStorage", "true"});
assertEquals(err.toString().trim(), 0, exitCode);
message = "restoreFailedStorage is set to true for.*";
assertOutputMatches(message + newLine + message + newLine);
exitCode = admin.run(new String[] {"-restoreFailedStorage", "false"});
assertEquals(err.toString().trim(), 0, exitCode);
message = "restoreFailedStorage is set to false for.*";
assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000)
public void testRefreshNodes() throws Exception {
setUpHaCluster(false);
int exitCode = admin.run(new String[] {"-refreshNodes"});
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Refresh nodes successful for.*";
assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000)
public void testSetBalancerBandwidth() throws Exception {
setUpHaCluster(false);
int exitCode = admin.run(new String[] {"-setBalancerBandwidth", "10"});
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Balancer bandwidth is set to 10 for.*";
assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000)
public void testMetaSave() throws Exception {
setUpHaCluster(false);
int exitCode = admin.run(new String[] {"-metasave", "dfs.meta"});
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Created metasave file dfs.meta in the log directory"
+ " of namenode.*";
assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000)
public void testRefreshServiceAcl() throws Exception {
setUpHaCluster(true);
int exitCode = admin.run(new String[] {"-refreshServiceAcl"});
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Refresh service acl successful for.*";
assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000)
public void testRefreshUserToGroupsMappings() throws Exception {
setUpHaCluster(false);
int exitCode = admin.run(new String[] {"-refreshUserToGroupsMappings"});
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Refresh user to groups mapping successful for.*";
assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000)
public void testRefreshSuperUserGroupsConfiguration() throws Exception {
setUpHaCluster(false);
int exitCode = admin.run(
new String[] {"-refreshSuperUserGroupsConfiguration"});
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Refresh super user groups configuration successful for.*";
assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000)
public void testRefreshCallQueue() throws Exception {
setUpHaCluster(false);
int exitCode = admin.run(new String[] {"-refreshCallQueue"});
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Refresh call queue successful for.*";
assertOutputMatches(message + newLine + message + newLine);
}
}
| 9,347 | 37.628099 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForAcl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools.offlineImageViewer;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.PrintStream;
import java.io.RandomAccessFile;
import java.io.StringReader;
import java.net.HttpURLConnection;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.util.HashMap;
import javax.xml.parsers.ParserConfigurationException;
import javax.xml.parsers.SAXParser;
import javax.xml.parsers.SAXParserFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.net.NetUtils;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import org.xml.sax.InputSource;
import org.xml.sax.SAXException;
import org.xml.sax.helpers.DefaultHandler;
import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS;
import static org.apache.hadoop.fs.permission.AclEntryScope.DEFAULT;
import static org.apache.hadoop.fs.permission.AclEntryType.GROUP;
import static org.apache.hadoop.fs.permission.AclEntryType.OTHER;
import static org.apache.hadoop.fs.permission.AclEntryType.USER;
import static org.apache.hadoop.fs.permission.FsAction.ALL;
import static org.apache.hadoop.fs.permission.FsAction.READ;
import static org.apache.hadoop.fs.permission.FsAction.READ_WRITE;
import static org.apache.hadoop.fs.permission.FsAction.READ_EXECUTE;
import static org.apache.hadoop.fs.permission.FsAction.NONE;
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
import static org.junit.Assert.assertEquals;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
/**
* Tests OfflineImageViewer if the input fsimage has HDFS ACLs
*/
public class TestOfflineImageViewerForAcl {
private static final Log LOG =
LogFactory.getLog(TestOfflineImageViewerForAcl.class);
private static File originalFsimage = null;
// ACLs as set to dfs, to be compared with viewer's output
final static HashMap<String, AclStatus> writtenAcls = Maps.newHashMap();
/**
* Create a populated namespace for later testing. Save its contents to a
* data structure and store its fsimage location.
* We only want to generate the fsimage file once and use it for
* multiple tests.
*/
@BeforeClass
public static void createOriginalFSImage() throws IOException {
MiniDFSCluster cluster = null;
try {
Configuration conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
DistributedFileSystem hdfs = cluster.getFileSystem();
// Create a reasonable namespace with ACLs
Path dir = new Path("/dirWithNoAcl");
hdfs.mkdirs(dir);
writtenAcls.put(dir.toString(), hdfs.getAclStatus(dir));
dir = new Path("/dirWithDefaultAcl");
hdfs.mkdirs(dir);
hdfs.setAcl(dir, Lists.newArrayList(
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, USER, "foo", ALL),
aclEntry(DEFAULT, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, OTHER, NONE)));
writtenAcls.put(dir.toString(), hdfs.getAclStatus(dir));
Path file = new Path("/noAcl");
FSDataOutputStream o = hdfs.create(file);
o.write(23);
o.close();
writtenAcls.put(file.toString(), hdfs.getAclStatus(file));
file = new Path("/withAcl");
o = hdfs.create(file);
o.write(23);
o.close();
hdfs.setAcl(file, Lists.newArrayList(
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, USER, "foo", READ),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, OTHER, NONE)));
writtenAcls.put(file.toString(), hdfs.getAclStatus(file));
file = new Path("/withSeveralAcls");
o = hdfs.create(file);
o.write(23);
o.close();
hdfs.setAcl(file, Lists.newArrayList(
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, USER, "foo", READ_WRITE),
aclEntry(ACCESS, USER, "bar", READ),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, GROUP, "group", READ),
aclEntry(ACCESS, OTHER, NONE)));
writtenAcls.put(file.toString(), hdfs.getAclStatus(file));
// Write results to the fsimage file
hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER, false);
hdfs.saveNamespace();
// Determine the location of the fsimage file
originalFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil
.getFSImage(cluster.getNameNode()).getStorage().getStorageDir(0));
if (originalFsimage == null) {
throw new RuntimeException("Didn't generate or can't find fsimage");
}
LOG.debug("original FS image file is " + originalFsimage);
} finally {
if (cluster != null)
cluster.shutdown();
}
}
@AfterClass
public static void deleteOriginalFSImage() throws IOException {
if (originalFsimage != null && originalFsimage.exists()) {
originalFsimage.delete();
}
}
@Test
public void testWebImageViewerForAcl() throws Exception {
WebImageViewer viewer = new WebImageViewer(
NetUtils.createSocketAddr("localhost:0"));
try {
viewer.initServer(originalFsimage.getAbsolutePath());
int port = viewer.getPort();
// create a WebHdfsFileSystem instance
URI uri = new URI("webhdfs://localhost:" + String.valueOf(port));
Configuration conf = new Configuration();
WebHdfsFileSystem webhdfs = (WebHdfsFileSystem)FileSystem.get(uri, conf);
// GETACLSTATUS operation to a directory without ACL
AclStatus acl = webhdfs.getAclStatus(new Path("/dirWithNoAcl"));
assertEquals(writtenAcls.get("/dirWithNoAcl"), acl);
// GETACLSTATUS operation to a directory with a default ACL
acl = webhdfs.getAclStatus(new Path("/dirWithDefaultAcl"));
assertEquals(writtenAcls.get("/dirWithDefaultAcl"), acl);
// GETACLSTATUS operation to a file without ACL
acl = webhdfs.getAclStatus(new Path("/noAcl"));
assertEquals(writtenAcls.get("/noAcl"), acl);
// GETACLSTATUS operation to a file with a ACL
acl = webhdfs.getAclStatus(new Path("/withAcl"));
assertEquals(writtenAcls.get("/withAcl"), acl);
// GETACLSTATUS operation to a file with several ACL entries
acl = webhdfs.getAclStatus(new Path("/withSeveralAcls"));
assertEquals(writtenAcls.get("/withSeveralAcls"), acl);
// GETACLSTATUS operation to a invalid path
URL url = new URL("http://localhost:" + port +
"/webhdfs/v1/invalid/?op=GETACLSTATUS");
HttpURLConnection connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.connect();
assertEquals(HttpURLConnection.HTTP_NOT_FOUND,
connection.getResponseCode());
} finally {
// shutdown the viewer
viewer.close();
}
}
@Test
public void testPBImageXmlWriterForAcl() throws Exception{
ByteArrayOutputStream output = new ByteArrayOutputStream();
PrintStream o = new PrintStream(output);
PBImageXmlWriter v = new PBImageXmlWriter(new Configuration(), o);
v.visit(new RandomAccessFile(originalFsimage, "r"));
SAXParserFactory spf = SAXParserFactory.newInstance();
SAXParser parser = spf.newSAXParser();
final String xml = output.toString();
parser.parse(new InputSource(new StringReader(xml)), new DefaultHandler());
}
}
| 8,929 | 37.826087 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForXAttr.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools.offlineImageViewer;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.net.URI;
import java.net.URL;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import org.apache.commons.io.IOUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.XAttrHelper;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import org.apache.hadoop.hdfs.web.JsonUtil;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.net.NetUtils;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
/**
* Tests OfflineImageViewer if the input fsimage has XAttributes
*/
public class TestOfflineImageViewerForXAttr {
private static final Log LOG = LogFactory
.getLog(TestOfflineImageViewerForXAttr.class);
private static File originalFsimage = null;
static String attr1JSon;
/**
* Create a populated namespace for later testing. Save its contents to a data
* structure and store its fsimage location. We only want to generate the
* fsimage file once and use it for multiple tests.
*/
@BeforeClass
public static void createOriginalFSImage() throws IOException {
MiniDFSCluster cluster = null;
Configuration conf = new Configuration();
try {
cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
DistributedFileSystem hdfs = cluster.getFileSystem();
// Create a name space with XAttributes
Path dir = new Path("/dir1");
hdfs.mkdirs(dir);
hdfs.setXAttr(dir, "user.attr1", "value1".getBytes());
hdfs.setXAttr(dir, "user.attr2", "value2".getBytes());
// Write results to the fsimage file
hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER, false);
hdfs.saveNamespace();
List<XAttr> attributes = new ArrayList<XAttr>();
attributes.add(XAttrHelper.buildXAttr("user.attr1", "value1".getBytes()));
attr1JSon = JsonUtil.toJsonString(attributes, null);
attributes.add(XAttrHelper.buildXAttr("user.attr2", "value2".getBytes()));
// Determine the location of the fsimage file
originalFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil
.getFSImage(cluster.getNameNode()).getStorage().getStorageDir(0));
if (originalFsimage == null) {
throw new RuntimeException("Didn't generate or can't find fsimage");
}
LOG.debug("original FS image file is " + originalFsimage);
} finally {
if (cluster != null)
cluster.shutdown();
}
}
@AfterClass
public static void deleteOriginalFSImage() throws IOException {
if (originalFsimage != null && originalFsimage.exists()) {
originalFsimage.delete();
}
}
@Test
public void testWebImageViewerForListXAttrs() throws Exception {
try (WebImageViewer viewer = new WebImageViewer(
NetUtils.createSocketAddr("localhost:0"))) {
viewer.initServer(originalFsimage.getAbsolutePath());
int port = viewer.getPort();
URL url = new URL("http://localhost:" + port
+ "/webhdfs/v1/dir1/?op=LISTXATTRS");
HttpURLConnection connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.connect();
assertEquals(HttpURLConnection.HTTP_OK, connection.getResponseCode());
String content = IOUtils.toString(connection.getInputStream());
assertTrue("Missing user.attr1 in response ",
content.contains("user.attr1"));
assertTrue("Missing user.attr2 in response ",
content.contains("user.attr2"));
}
}
@Test
public void testWebImageViewerForGetXAttrsWithOutParameters()
throws Exception {
try (WebImageViewer viewer = new WebImageViewer(
NetUtils.createSocketAddr("localhost:0"))) {
viewer.initServer(originalFsimage.getAbsolutePath());
int port = viewer.getPort();
URL url = new URL("http://localhost:" + port
+ "/webhdfs/v1/dir1/?op=GETXATTRS");
HttpURLConnection connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.connect();
assertEquals(HttpURLConnection.HTTP_OK, connection.getResponseCode());
String content = IOUtils.toString(connection.getInputStream());
assertTrue("Missing user.attr1 in response ",
content.contains("user.attr1"));
assertTrue("Missing user.attr2 in response ",
content.contains("user.attr2"));
}
}
@Test
public void testWebImageViewerForGetXAttrsWithParameters() throws Exception {
try (WebImageViewer viewer = new WebImageViewer(
NetUtils.createSocketAddr("localhost:0"))) {
viewer.initServer(originalFsimage.getAbsolutePath());
int port = viewer.getPort();
URL url = new URL("http://localhost:" + port
+ "/webhdfs/v1/dir1/?op=GETXATTRS&xattr.name=attr8");
HttpURLConnection connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.connect();
assertEquals(HttpURLConnection.HTTP_BAD_REQUEST,
connection.getResponseCode());
url = new URL("http://localhost:" + port
+ "/webhdfs/v1/dir1/?op=GETXATTRS&xattr.name=user.attr1");
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.connect();
assertEquals(HttpURLConnection.HTTP_OK, connection.getResponseCode());
String content = IOUtils.toString(connection.getInputStream());
assertEquals(attr1JSon, content);
}
}
@Test
public void testWebImageViewerForGetXAttrsWithCodecParameters()
throws Exception {
try (WebImageViewer viewer = new WebImageViewer(
NetUtils.createSocketAddr("localhost:0"))) {
viewer.initServer(originalFsimage.getAbsolutePath());
int port = viewer.getPort();
URL url = new URL(
"http://localhost:"
+ port
+ "/webhdfs/v1/dir1/?op=GETXATTRS&xattr.name=USER.attr1&encoding=TEXT");
HttpURLConnection connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.connect();
assertEquals(HttpURLConnection.HTTP_OK, connection.getResponseCode());
String content = IOUtils.toString(connection.getInputStream());
assertEquals(attr1JSon, content);
}
}
@Test
public void testWithWebHdfsFileSystem() throws Exception {
try (WebImageViewer viewer = new WebImageViewer(
NetUtils.createSocketAddr("localhost:0"))) {
viewer.initServer(originalFsimage.getAbsolutePath());
int port = viewer.getPort();
// create a WebHdfsFileSystem instance
URI uri = new URI("webhdfs://localhost:" + String.valueOf(port));
Configuration conf = new Configuration();
WebHdfsFileSystem webhdfs = (WebHdfsFileSystem) FileSystem.get(uri, conf);
List<String> names = webhdfs.listXAttrs(new Path("/dir1"));
assertTrue(names.contains("user.attr1"));
assertTrue(names.contains("user.attr2"));
String value = new String(webhdfs.getXAttr(new Path("/dir1"),
"user.attr1"));
assertEquals("value1", value);
value = new String(webhdfs.getXAttr(new Path("/dir1"), "USER.attr1"));
assertEquals("value1", value);
Map<String, byte[]> contentMap = webhdfs.getXAttrs(new Path("/dir1"),
names);
assertEquals("value1", new String(contentMap.get("user.attr1")));
assertEquals("value2", new String(contentMap.get("user.attr2")));
}
}
@Test
public void testResponseCode() throws Exception {
try (WebImageViewer viewer = new WebImageViewer(
NetUtils.createSocketAddr("localhost:0"))) {
viewer.initServer(originalFsimage.getAbsolutePath());
int port = viewer.getPort();
URL url = new URL(
"http://localhost:"
+ port
+ "/webhdfs/v1/dir1/?op=GETXATTRS&xattr.name=user.notpresent&encoding=TEXT");
HttpURLConnection connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.connect();
assertEquals(HttpURLConnection.HTTP_FORBIDDEN,
connection.getResponseCode());
}
}
}
| 9,609 | 35.12782 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools.offlineImageViewer;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.BufferedReader;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.PrintStream;
import java.io.RandomAccessFile;
import java.io.StringReader;
import java.net.HttpURLConnection;
import java.net.URI;
import java.net.URL;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import javax.xml.parsers.ParserConfigurationException;
import javax.xml.parsers.SAXParser;
import javax.xml.parsers.SAXParserFactory;
import org.apache.commons.io.output.NullOutputStream;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.token.Token;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import org.xml.sax.InputSource;
import org.xml.sax.SAXException;
import org.xml.sax.helpers.DefaultHandler;
import com.google.common.collect.Maps;
public class TestOfflineImageViewer {
private static final Log LOG = LogFactory.getLog(OfflineImageViewerPB.class);
private static final int NUM_DIRS = 3;
private static final int FILES_PER_DIR = 4;
private static final String TEST_RENEWER = "JobTracker";
private static File originalFsimage = null;
// namespace as written to dfs, to be compared with viewer's output
final static HashMap<String, FileStatus> writtenFiles = Maps.newHashMap();
@Rule
public TemporaryFolder folder = new TemporaryFolder();
// Create a populated namespace for later testing. Save its contents to a
// data structure and store its fsimage location.
// We only want to generate the fsimage file once and use it for
// multiple tests.
@BeforeClass
public static void createOriginalFSImage() throws IOException {
MiniDFSCluster cluster = null;
try {
Configuration conf = new Configuration();
conf.setLong(
DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
conf.setLong(
DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
conf.setBoolean(
DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL,
"RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" + "DEFAULT");
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
DistributedFileSystem hdfs = cluster.getFileSystem();
// Create a reasonable namespace
for (int i = 0; i < NUM_DIRS; i++) {
Path dir = new Path("/dir" + i);
hdfs.mkdirs(dir);
writtenFiles.put(dir.toString(), pathToFileEntry(hdfs, dir.toString()));
for (int j = 0; j < FILES_PER_DIR; j++) {
Path file = new Path(dir, "file" + j);
FSDataOutputStream o = hdfs.create(file);
o.write(23);
o.close();
writtenFiles.put(file.toString(),
pathToFileEntry(hdfs, file.toString()));
}
}
// Create an empty directory
Path emptydir = new Path("/emptydir");
hdfs.mkdirs(emptydir);
writtenFiles.put(emptydir.toString(), hdfs.getFileStatus(emptydir));
//Create a directory whose name should be escaped in XML
Path invalidXMLDir = new Path("/dirContainingInvalidXMLChar\u0000here");
hdfs.mkdirs(invalidXMLDir);
// Get delegation tokens so we log the delegation token op
Token<?>[] delegationTokens = hdfs
.addDelegationTokens(TEST_RENEWER, null);
for (Token<?> t : delegationTokens) {
LOG.debug("got token " + t);
}
final Path snapshot = new Path("/snapshot");
hdfs.mkdirs(snapshot);
hdfs.allowSnapshot(snapshot);
hdfs.mkdirs(new Path("/snapshot/1"));
hdfs.delete(snapshot, true);
// Set XAttrs so the fsimage contains XAttr ops
final Path xattr = new Path("/xattr");
hdfs.mkdirs(xattr);
hdfs.setXAttr(xattr, "user.a1", new byte[]{ 0x31, 0x32, 0x33 });
hdfs.setXAttr(xattr, "user.a2", new byte[]{ 0x37, 0x38, 0x39 });
// OIV should be able to handle empty value XAttrs
hdfs.setXAttr(xattr, "user.a3", null);
writtenFiles.put(xattr.toString(), hdfs.getFileStatus(xattr));
// Write results to the fsimage file
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
hdfs.saveNamespace();
// Determine location of fsimage file
originalFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil
.getFSImage(cluster.getNameNode()).getStorage().getStorageDir(0));
if (originalFsimage == null) {
throw new RuntimeException("Didn't generate or can't find fsimage");
}
LOG.debug("original FS image file is " + originalFsimage);
} finally {
if (cluster != null)
cluster.shutdown();
}
}
@AfterClass
public static void deleteOriginalFSImage() throws IOException {
if (originalFsimage != null && originalFsimage.exists()) {
originalFsimage.delete();
}
}
// Convenience method to generate a file status from file system for
// later comparison
private static FileStatus pathToFileEntry(FileSystem hdfs, String file)
throws IOException {
return hdfs.getFileStatus(new Path(file));
}
@Test(expected = IOException.class)
public void testTruncatedFSImage() throws IOException {
File truncatedFile = folder.newFile();
PrintStream output = new PrintStream(NullOutputStream.NULL_OUTPUT_STREAM);
copyPartOfFile(originalFsimage, truncatedFile);
new FileDistributionCalculator(new Configuration(), 0, 0, output)
.visit(new RandomAccessFile(truncatedFile, "r"));
}
private void copyPartOfFile(File src, File dest) throws IOException {
FileInputStream in = null;
FileOutputStream out = null;
final int MAX_BYTES = 700;
try {
in = new FileInputStream(src);
out = new FileOutputStream(dest);
in.getChannel().transferTo(0, MAX_BYTES, out.getChannel());
} finally {
IOUtils.cleanup(null, in);
IOUtils.cleanup(null, out);
}
}
@Test
public void testFileDistributionCalculator() throws IOException {
ByteArrayOutputStream output = new ByteArrayOutputStream();
PrintStream o = new PrintStream(output);
new FileDistributionCalculator(new Configuration(), 0, 0, o)
.visit(new RandomAccessFile(originalFsimage, "r"));
o.close();
String outputString = output.toString();
Pattern p = Pattern.compile("totalFiles = (\\d+)\n");
Matcher matcher = p.matcher(outputString);
assertTrue(matcher.find() && matcher.groupCount() == 1);
int totalFiles = Integer.parseInt(matcher.group(1));
assertEquals(NUM_DIRS * FILES_PER_DIR, totalFiles);
p = Pattern.compile("totalDirectories = (\\d+)\n");
matcher = p.matcher(outputString);
assertTrue(matcher.find() && matcher.groupCount() == 1);
int totalDirs = Integer.parseInt(matcher.group(1));
// totalDirs includes root directory, empty directory, and xattr directory
assertEquals(NUM_DIRS + 4, totalDirs);
FileStatus maxFile = Collections.max(writtenFiles.values(),
new Comparator<FileStatus>() {
@Override
public int compare(FileStatus first, FileStatus second) {
return first.getLen() < second.getLen() ? -1 :
((first.getLen() == second.getLen()) ? 0 : 1);
}
});
p = Pattern.compile("maxFileSize = (\\d+)\n");
matcher = p.matcher(output.toString("UTF-8"));
assertTrue(matcher.find() && matcher.groupCount() == 1);
assertEquals(maxFile.getLen(), Long.parseLong(matcher.group(1)));
}
@Test
public void testFileDistributionCalculatorWithOptions() throws Exception {
int status = OfflineImageViewerPB.run(new String[] {"-i",
originalFsimage.getAbsolutePath(), "-o", "-", "-p", "FileDistribution",
"-maxSize", "512", "-step", "8"});
assertEquals(0, status);
}
@Test
public void testPBImageXmlWriter() throws IOException, SAXException,
ParserConfigurationException {
ByteArrayOutputStream output = new ByteArrayOutputStream();
PrintStream o = new PrintStream(output);
PBImageXmlWriter v = new PBImageXmlWriter(new Configuration(), o);
v.visit(new RandomAccessFile(originalFsimage, "r"));
SAXParserFactory spf = SAXParserFactory.newInstance();
SAXParser parser = spf.newSAXParser();
final String xml = output.toString();
parser.parse(new InputSource(new StringReader(xml)), new DefaultHandler());
}
@Test
public void testWebImageViewer() throws Exception {
WebImageViewer viewer = new WebImageViewer(
NetUtils.createSocketAddr("localhost:0"));
try {
viewer.initServer(originalFsimage.getAbsolutePath());
int port = viewer.getPort();
// create a WebHdfsFileSystem instance
URI uri = new URI("webhdfs://localhost:" + String.valueOf(port));
Configuration conf = new Configuration();
WebHdfsFileSystem webhdfs = (WebHdfsFileSystem)FileSystem.get(uri, conf);
// verify the number of directories
FileStatus[] statuses = webhdfs.listStatus(new Path("/"));
assertEquals(NUM_DIRS + 3, statuses.length); // contains empty and xattr directory
// verify the number of files in the directory
statuses = webhdfs.listStatus(new Path("/dir0"));
assertEquals(FILES_PER_DIR, statuses.length);
// compare a file
FileStatus status = webhdfs.listStatus(new Path("/dir0/file0"))[0];
FileStatus expected = writtenFiles.get("/dir0/file0");
compareFile(expected, status);
// LISTSTATUS operation to an empty directory
statuses = webhdfs.listStatus(new Path("/emptydir"));
assertEquals(0, statuses.length);
// LISTSTATUS operation to a invalid path
URL url = new URL("http://localhost:" + port +
"/webhdfs/v1/invalid/?op=LISTSTATUS");
verifyHttpResponseCode(HttpURLConnection.HTTP_NOT_FOUND, url);
// LISTSTATUS operation to a invalid prefix
url = new URL("http://localhost:" + port + "/foo");
verifyHttpResponseCode(HttpURLConnection.HTTP_NOT_FOUND, url);
// GETFILESTATUS operation
status = webhdfs.getFileStatus(new Path("/dir0/file0"));
compareFile(expected, status);
// GETFILESTATUS operation to a invalid path
url = new URL("http://localhost:" + port +
"/webhdfs/v1/invalid/?op=GETFILESTATUS");
verifyHttpResponseCode(HttpURLConnection.HTTP_NOT_FOUND, url);
// invalid operation
url = new URL("http://localhost:" + port + "/webhdfs/v1/?op=INVALID");
verifyHttpResponseCode(HttpURLConnection.HTTP_BAD_REQUEST, url);
// invalid method
url = new URL("http://localhost:" + port + "/webhdfs/v1/?op=LISTSTATUS");
HttpURLConnection connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("POST");
connection.connect();
assertEquals(HttpURLConnection.HTTP_BAD_METHOD,
connection.getResponseCode());
} finally {
// shutdown the viewer
viewer.close();
}
}
@Test
public void testPBDelimitedWriter() throws IOException, InterruptedException {
testPBDelimitedWriter(""); // Test in memory db.
testPBDelimitedWriter(
new FileSystemTestHelper().getTestRootDir() + "/delimited.db");
}
private void testPBDelimitedWriter(String db)
throws IOException, InterruptedException {
final String DELIMITER = "\t";
ByteArrayOutputStream output = new ByteArrayOutputStream();
try (PrintStream o = new PrintStream(output)) {
PBImageDelimitedTextWriter v =
new PBImageDelimitedTextWriter(o, DELIMITER, db);
v.visit(new RandomAccessFile(originalFsimage, "r"));
}
Set<String> fileNames = new HashSet<>();
try (
ByteArrayInputStream input =
new ByteArrayInputStream(output.toByteArray());
BufferedReader reader =
new BufferedReader(new InputStreamReader(input))) {
String line;
while ((line = reader.readLine()) != null) {
System.out.println(line);
String[] fields = line.split(DELIMITER);
assertEquals(12, fields.length);
fileNames.add(fields[0]);
}
}
// writtenFiles does not contain root directory and "invalid XML char" dir.
for (Iterator<String> it = fileNames.iterator(); it.hasNext(); ) {
String filename = it.next();
if (filename.startsWith("/dirContainingInvalidXMLChar")) {
it.remove();
} else if (filename.equals("/")) {
it.remove();
}
}
assertEquals(writtenFiles.keySet(), fileNames);
}
private static void compareFile(FileStatus expected, FileStatus status) {
assertEquals(expected.getAccessTime(), status.getAccessTime());
assertEquals(expected.getBlockSize(), status.getBlockSize());
assertEquals(expected.getGroup(), status.getGroup());
assertEquals(expected.getLen(), status.getLen());
assertEquals(expected.getModificationTime(),
status.getModificationTime());
assertEquals(expected.getOwner(), status.getOwner());
assertEquals(expected.getPermission(), status.getPermission());
assertEquals(expected.getReplication(), status.getReplication());
assertEquals(expected.isDirectory(), status.isDirectory());
}
private void verifyHttpResponseCode(int expectedCode, URL url)
throws IOException {
HttpURLConnection connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.connect();
assertEquals(expectedCode, connection.getResponseCode());
}
}
| 15,654 | 37.846154 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools.offlineEditsViewer;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import org.apache.commons.io.FileUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes;
import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
import org.apache.hadoop.hdfs.server.namenode.OfflineEditsViewerHelper;
import org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer.Flags;
import org.apache.hadoop.test.PathUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import com.google.common.collect.ImmutableSet;
public class TestOfflineEditsViewer {
private static final Log LOG = LogFactory
.getLog(TestOfflineEditsViewer.class);
private static final String buildDir = PathUtils
.getTestDirName(TestOfflineEditsViewer.class);
// to create edits and get edits filename
private static final OfflineEditsViewerHelper nnHelper = new OfflineEditsViewerHelper();
private static final ImmutableSet<FSEditLogOpCodes> skippedOps = skippedOps();
@SuppressWarnings("deprecation")
private static ImmutableSet<FSEditLogOpCodes> skippedOps() {
ImmutableSet.Builder<FSEditLogOpCodes> b = ImmutableSet.builder();
// Deprecated opcodes
b.add(FSEditLogOpCodes.OP_DATANODE_ADD)
.add(FSEditLogOpCodes.OP_DATANODE_REMOVE)
.add(FSEditLogOpCodes.OP_SET_NS_QUOTA)
.add(FSEditLogOpCodes.OP_CLEAR_NS_QUOTA)
.add(FSEditLogOpCodes.OP_SET_GENSTAMP_V1);
// Cannot test delegation token related code in insecure set up
b.add(FSEditLogOpCodes.OP_GET_DELEGATION_TOKEN)
.add(FSEditLogOpCodes.OP_RENEW_DELEGATION_TOKEN)
.add(FSEditLogOpCodes.OP_CANCEL_DELEGATION_TOKEN);
// Skip invalid opcode
b.add(FSEditLogOpCodes.OP_INVALID);
return b.build();
}
@Rule
public final TemporaryFolder folder = new TemporaryFolder();
@Before
public void setUp() throws IOException {
nnHelper.startCluster(buildDir + "/dfs/");
}
@After
public void tearDown() throws IOException {
nnHelper.shutdownCluster();
}
/**
* Test the OfflineEditsViewer
*/
@Test
public void testGenerated() throws IOException {
// edits generated by nnHelper (MiniDFSCluster), should have all op codes
// binary, XML, reparsed binary
String edits = nnHelper.generateEdits();
LOG.info("Generated edits=" + edits);
String editsParsedXml = folder.newFile("editsParsed.xml").getAbsolutePath();
String editsReparsed = folder.newFile("editsParsed").getAbsolutePath();
// parse to XML then back to binary
assertEquals(0, runOev(edits, editsParsedXml, "xml", false));
assertEquals(0, runOev(editsParsedXml, editsReparsed, "binary", false));
// judgment time
assertTrue("Edits " + edits + " should have all op codes",
hasAllOpCodes(edits));
LOG.info("Comparing generated file " + editsReparsed
+ " with reference file " + edits);
assertTrue(
"Generated edits and reparsed (bin to XML to bin) should be same",
filesEqualIgnoreTrailingZeros(edits, editsReparsed));
}
@Test
public void testRecoveryMode() throws IOException {
// edits generated by nnHelper (MiniDFSCluster), should have all op codes
// binary, XML, reparsed binary
String edits = nnHelper.generateEdits();
FileOutputStream os = new FileOutputStream(edits, true);
// Corrupt the file by truncating the end
FileChannel editsFile = os.getChannel();
editsFile.truncate(editsFile.size() - 5);
String editsParsedXml = folder.newFile("editsRecoveredParsed.xml")
.getAbsolutePath();
String editsReparsed = folder.newFile("editsRecoveredReparsed")
.getAbsolutePath();
String editsParsedXml2 = folder.newFile("editsRecoveredParsed2.xml")
.getAbsolutePath();
// Can't read the corrupted file without recovery mode
assertEquals(-1, runOev(edits, editsParsedXml, "xml", false));
// parse to XML then back to binary
assertEquals(0, runOev(edits, editsParsedXml, "xml", true));
assertEquals(0, runOev(editsParsedXml, editsReparsed, "binary", false));
assertEquals(0, runOev(editsReparsed, editsParsedXml2, "xml", false));
// judgment time
assertTrue("Test round trip", FileUtils.contentEqualsIgnoreEOL(
new File(editsParsedXml), new File(editsParsedXml2), "UTF-8"));
os.close();
}
@Test
public void testStored() throws IOException {
// reference edits stored with source code (see build.xml)
final String cacheDir = System.getProperty("test.cache.data",
"build/test/cache");
// binary, XML, reparsed binary
String editsStored = cacheDir + "/editsStored";
String editsStoredParsedXml = cacheDir + "/editsStoredParsed.xml";
String editsStoredReparsed = cacheDir + "/editsStoredReparsed";
// reference XML version of editsStored (see build.xml)
String editsStoredXml = cacheDir + "/editsStored.xml";
// parse to XML then back to binary
assertEquals(0, runOev(editsStored, editsStoredParsedXml, "xml", false));
assertEquals(0,
runOev(editsStoredParsedXml, editsStoredReparsed, "binary", false));
// judgement time
assertTrue("Edits " + editsStored + " should have all op codes",
hasAllOpCodes(editsStored));
assertTrue("Reference XML edits and parsed to XML should be same",
FileUtils.contentEqualsIgnoreEOL(new File(editsStoredXml),
new File(editsStoredParsedXml), "UTF-8"));
assertTrue(
"Reference edits and reparsed (bin to XML to bin) should be same",
filesEqualIgnoreTrailingZeros(editsStored, editsStoredReparsed));
}
/**
* Run OfflineEditsViewer
*
* @param inFilename input edits filename
* @param outFilename oputput edits filename
*/
private int runOev(String inFilename, String outFilename, String processor,
boolean recovery) throws IOException {
LOG.info("Running oev [" + inFilename + "] [" + outFilename + "]");
OfflineEditsViewer oev = new OfflineEditsViewer();
Flags flags = new Flags();
flags.setPrintToScreen();
if (recovery) {
flags.setRecoveryMode();
}
return oev.go(inFilename, outFilename, processor, flags, null);
}
/**
* Checks that the edits file has all opCodes
*
* @param filename edits file
* @return true is edits (filename) has all opCodes
*/
private boolean hasAllOpCodes(String inFilename) throws IOException {
String outFilename = inFilename + ".stats";
FileOutputStream fout = new FileOutputStream(outFilename);
StatisticsEditsVisitor visitor = new StatisticsEditsVisitor(fout);
OfflineEditsViewer oev = new OfflineEditsViewer();
if (oev.go(inFilename, outFilename, "stats", new Flags(), visitor) != 0)
return false;
LOG.info("Statistics for " + inFilename + "\n"
+ visitor.getStatisticsString());
boolean hasAllOpCodes = true;
for (FSEditLogOpCodes opCode : FSEditLogOpCodes.values()) {
// don't need to test obsolete opCodes
if (skippedOps.contains(opCode))
continue;
Long count = visitor.getStatistics().get(opCode);
if ((count == null) || (count == 0)) {
hasAllOpCodes = false;
LOG.info("Opcode " + opCode + " not tested in " + inFilename);
}
}
return hasAllOpCodes;
}
/**
* Compare two files, ignore trailing zeros at the end, for edits log the
* trailing zeros do not make any difference, throw exception is the files are
* not same
*
* @param filenameSmall first file to compare (doesn't have to be smaller)
* @param filenameLarge second file to compare (doesn't have to be larger)
*/
private boolean filesEqualIgnoreTrailingZeros(String filenameSmall,
String filenameLarge) throws IOException {
ByteBuffer small = ByteBuffer.wrap(DFSTestUtil.loadFile(filenameSmall));
ByteBuffer large = ByteBuffer.wrap(DFSTestUtil.loadFile(filenameLarge));
// OEV outputs with the latest layout version, so tweak the old file's
// contents to have latest version so checkedin binary files don't
// require frequent updates
small.put(3, (byte)NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
// now correct if it's otherwise
if (small.capacity() > large.capacity()) {
ByteBuffer tmpByteBuffer = small;
small = large;
large = tmpByteBuffer;
String tmpFilename = filenameSmall;
filenameSmall = filenameLarge;
filenameLarge = tmpFilename;
}
// compare from 0 to capacity of small
// the rest of the large should be all zeros
small.position(0);
small.limit(small.capacity());
large.position(0);
large.limit(small.capacity());
// compares position to limit
if (!small.equals(large)) {
return false;
}
// everything after limit should be 0xFF
int i = large.limit();
large.clear();
for (; i < large.capacity(); i++) {
if (large.get(i) != FSEditLogOpCodes.OP_INVALID.getOpCode()) {
return false;
}
}
return true;
}
}
| 10,242 | 35.451957 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/crypto/TestHdfsCryptoStreams.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.crypto;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.CryptoCodec;
import org.apache.hadoop.crypto.CryptoStreamsTestBase;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.crypto.CryptoFSDataInputStream;
import org.apache.hadoop.fs.crypto.CryptoFSDataOutputStream;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
public class TestHdfsCryptoStreams extends CryptoStreamsTestBase {
private static MiniDFSCluster dfsCluster;
private static FileSystem fs;
private static int pathCount = 0;
private static Path path;
private static Path file;
@BeforeClass
public static void init() throws Exception {
Configuration conf = new HdfsConfiguration();
dfsCluster = new MiniDFSCluster.Builder(conf).build();
dfsCluster.waitClusterUp();
fs = dfsCluster.getFileSystem();
codec = CryptoCodec.getInstance(conf);
}
@AfterClass
public static void shutdown() throws Exception {
if (dfsCluster != null) {
dfsCluster.shutdown();
}
}
@Before
@Override
public void setUp() throws IOException {
++pathCount;
path = new Path("/p" + pathCount);
file = new Path(path, "file");
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) 0700));
super.setUp();
}
@After
public void cleanUp() throws IOException {
fs.delete(path, true);
}
@Override
protected OutputStream getOutputStream(int bufferSize, byte[] key, byte[] iv)
throws IOException {
return new CryptoFSDataOutputStream(fs.create(file), codec, bufferSize,
key, iv);
}
@Override
protected InputStream getInputStream(int bufferSize, byte[] key, byte[] iv)
throws IOException {
return new CryptoFSDataInputStream(fs.open(file), codec, bufferSize, key,
iv);
}
}
| 2,965 | 31.23913 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.security;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
import java.net.URI;
import java.security.PrivilegedExceptionAction;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
import org.apache.hadoop.hdfs.web.WebHdfsConstants;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.security.token.Token;
import org.apache.log4j.Level;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
public class TestDelegationToken {
private MiniDFSCluster cluster;
private DelegationTokenSecretManager dtSecretManager;
private Configuration config;
private static final Log LOG = LogFactory.getLog(TestDelegationToken.class);
@Before
public void setUp() throws Exception {
config = new HdfsConfiguration();
config.setBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
config.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
config.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
config.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
config.set("hadoop.security.auth_to_local",
"RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" + "DEFAULT");
FileSystem.setDefaultUri(config, "hdfs://localhost:" + "0");
cluster = new MiniDFSCluster.Builder(config).numDataNodes(0).build();
cluster.waitActive();
dtSecretManager = NameNodeAdapter.getDtSecretManager(
cluster.getNamesystem());
}
@After
public void tearDown() throws Exception {
if(cluster!=null) {
cluster.shutdown();
}
}
private Token<DelegationTokenIdentifier> generateDelegationToken(
String owner, String renewer) {
DelegationTokenIdentifier dtId = new DelegationTokenIdentifier(new Text(
owner), new Text(renewer), null);
return new Token<DelegationTokenIdentifier>(dtId, dtSecretManager);
}
@Test
public void testDelegationTokenSecretManager() throws Exception {
Token<DelegationTokenIdentifier> token = generateDelegationToken(
"SomeUser", "JobTracker");
// Fake renewer should not be able to renew
try {
dtSecretManager.renewToken(token, "FakeRenewer");
Assert.fail("should have failed");
} catch (AccessControlException ace) {
// PASS
}
dtSecretManager.renewToken(token, "JobTracker");
DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
byte[] tokenId = token.getIdentifier();
identifier.readFields(new DataInputStream(
new ByteArrayInputStream(tokenId)));
Assert.assertTrue(null != dtSecretManager.retrievePassword(identifier));
LOG.info("Sleep to expire the token");
Thread.sleep(6000);
//Token should be expired
try {
dtSecretManager.retrievePassword(identifier);
//Should not come here
Assert.fail("Token should have expired");
} catch (InvalidToken e) {
//Success
}
dtSecretManager.renewToken(token, "JobTracker");
LOG.info("Sleep beyond the max lifetime");
Thread.sleep(5000);
try {
dtSecretManager.renewToken(token, "JobTracker");
Assert.fail("should have been expired");
} catch (InvalidToken it) {
// PASS
}
}
@Test
public void testCancelDelegationToken() throws Exception {
Token<DelegationTokenIdentifier> token = generateDelegationToken(
"SomeUser", "JobTracker");
//Fake renewer should not be able to renew
try {
dtSecretManager.cancelToken(token, "FakeCanceller");
Assert.fail("should have failed");
} catch (AccessControlException ace) {
// PASS
}
dtSecretManager.cancelToken(token, "JobTracker");
try {
dtSecretManager.renewToken(token, "JobTracker");
Assert.fail("should have failed");
} catch (InvalidToken it) {
// PASS
}
}
@Test
public void testAddDelegationTokensDFSApi() throws Exception {
UserGroupInformation ugi = UserGroupInformation.createRemoteUser("JobTracker");
DistributedFileSystem dfs = cluster.getFileSystem();
Credentials creds = new Credentials();
final Token<?> tokens[] = dfs.addDelegationTokens("JobTracker", creds);
Assert.assertEquals(1, tokens.length);
Assert.assertEquals(1, creds.numberOfTokens());
checkTokenIdentifier(ugi, tokens[0]);
final Token<?> tokens2[] = dfs.addDelegationTokens("JobTracker", creds);
Assert.assertEquals(0, tokens2.length); // already have token
Assert.assertEquals(1, creds.numberOfTokens());
}
@SuppressWarnings("deprecation")
@Test
public void testDelegationTokenWebHdfsApi() throws Exception {
((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);
final String uri = WebHdfsConstants.WEBHDFS_SCHEME + "://"
+ config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
//get file system as JobTracker
final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
"JobTracker", new String[]{"user"});
final WebHdfsFileSystem webhdfs = ugi.doAs(
new PrivilegedExceptionAction<WebHdfsFileSystem>() {
@Override
public WebHdfsFileSystem run() throws Exception {
return (WebHdfsFileSystem)FileSystem.get(new URI(uri), config);
}
});
{ //test addDelegationTokens(..)
Credentials creds = new Credentials();
final Token<?> tokens[] = webhdfs.addDelegationTokens("JobTracker", creds);
Assert.assertEquals(1, tokens.length);
Assert.assertEquals(1, creds.numberOfTokens());
Assert.assertSame(tokens[0], creds.getAllTokens().iterator().next());
checkTokenIdentifier(ugi, tokens[0]);
final Token<?> tokens2[] = webhdfs.addDelegationTokens("JobTracker", creds);
Assert.assertEquals(0, tokens2.length);
}
}
@Test
public void testDelegationTokenWithDoAs() throws Exception {
final DistributedFileSystem dfs = cluster.getFileSystem();
final Credentials creds = new Credentials();
final Token<?> tokens[] = dfs.addDelegationTokens("JobTracker", creds);
Assert.assertEquals(1, tokens.length);
@SuppressWarnings("unchecked")
final Token<DelegationTokenIdentifier> token =
(Token<DelegationTokenIdentifier>) tokens[0];
final UserGroupInformation longUgi = UserGroupInformation
.createRemoteUser("JobTracker/[email protected]");
final UserGroupInformation shortUgi = UserGroupInformation
.createRemoteUser("JobTracker");
longUgi.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws IOException {
try {
token.renew(config);
} catch (Exception e) {
Assert.fail("Could not renew delegation token for user "+longUgi);
}
return null;
}
});
shortUgi.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
token.renew(config);
return null;
}
});
longUgi.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws IOException {
try {
token.cancel(config);
} catch (Exception e) {
Assert.fail("Could not cancel delegation token for user "+longUgi);
}
return null;
}
});
}
/**
* Test that the delegation token secret manager only runs when the
* NN is out of safe mode. This is because the secret manager
* has to log to the edit log, which should not be written in
* safe mode. Regression test for HDFS-2579.
*/
@Test
public void testDTManagerInSafeMode() throws Exception {
cluster.startDataNodes(config, 1, true, StartupOption.REGULAR, null);
FileSystem fs = cluster.getFileSystem();
for (int i = 0; i < 5; i++) {
DFSTestUtil.createFile(fs, new Path("/test-" + i), 100, (short)1, 1L);
}
cluster.getConfiguration(0).setInt(
DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_KEY, 500);
cluster.getConfiguration(0).setInt(
DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 30000);
cluster.setWaitSafeMode(false);
cluster.restartNameNode();
NameNode nn = cluster.getNameNode();
assertTrue(nn.isInSafeMode());
DelegationTokenSecretManager sm =
NameNodeAdapter.getDtSecretManager(nn.getNamesystem());
assertFalse("Secret manager should not run in safe mode", sm.isRunning());
NameNodeAdapter.leaveSafeMode(nn);
assertTrue("Secret manager should start when safe mode is exited",
sm.isRunning());
LOG.info("========= entering safemode again");
NameNodeAdapter.enterSafeMode(nn, false);
assertFalse("Secret manager should stop again when safe mode " +
"is manually entered", sm.isRunning());
// Set the cluster to leave safemode quickly on its own.
cluster.getConfiguration(0).setInt(
DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 0);
cluster.setWaitSafeMode(true);
cluster.restartNameNode();
nn = cluster.getNameNode();
sm = NameNodeAdapter.getDtSecretManager(nn.getNamesystem());
assertFalse(nn.isInSafeMode());
assertTrue(sm.isRunning());
}
@SuppressWarnings("unchecked")
private void checkTokenIdentifier(UserGroupInformation ugi, final Token<?> token)
throws Exception {
Assert.assertNotNull(token);
// should be able to use token.decodeIdentifier() but webhdfs isn't
// registered with the service loader for token decoding
DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
byte[] tokenId = token.getIdentifier();
DataInputStream in = new DataInputStream(new ByteArrayInputStream(tokenId));
try {
identifier.readFields(in);
} finally {
in.close();
}
Assert.assertNotNull(identifier);
LOG.info("A valid token should have non-null password, and should be renewed successfully");
Assert.assertTrue(null != dtSecretManager.retrievePassword(identifier));
dtSecretManager.renewToken((Token<DelegationTokenIdentifier>) token, "JobTracker");
ugi.doAs(
new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
token.renew(config);
token.cancel(config);
return null;
}
});
}
}
| 12,519 | 38.003115 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestClientProtocolWithDelegationToken.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.security;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
import static org.mockito.Mockito.mock;
import java.net.InetSocketAddress;
import java.security.PrivilegedExceptionAction;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.Client;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.SaslInputStream;
import org.apache.hadoop.security.SaslRpcClient;
import org.apache.hadoop.security.SaslRpcServer;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.log4j.Level;
import org.junit.Test;
/** Unit tests for using Delegation Token over RPC. */
public class TestClientProtocolWithDelegationToken {
private static final String ADDRESS = "0.0.0.0";
public static final Log LOG = LogFactory
.getLog(TestClientProtocolWithDelegationToken.class);
private static final Configuration conf;
static {
conf = new Configuration();
conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
UserGroupInformation.setConfiguration(conf);
}
static {
((Log4JLogger) Client.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) Server.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) SaslRpcClient.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) SaslRpcServer.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) SaslInputStream.LOG).getLogger().setLevel(Level.ALL);
}
@Test
public void testDelegationTokenRpc() throws Exception {
ClientProtocol mockNN = mock(ClientProtocol.class);
FSNamesystem mockNameSys = mock(FSNamesystem.class);
DelegationTokenSecretManager sm = new DelegationTokenSecretManager(
DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT,
DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT,
DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT,
3600000, mockNameSys);
sm.startThreads();
final Server server = new RPC.Builder(conf)
.setProtocol(ClientProtocol.class).setInstance(mockNN)
.setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true)
.setSecretManager(sm).build();
server.start();
final UserGroupInformation current = UserGroupInformation.getCurrentUser();
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
String user = current.getUserName();
Text owner = new Text(user);
DelegationTokenIdentifier dtId = new DelegationTokenIdentifier(owner, owner, null);
Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>(
dtId, sm);
SecurityUtil.setTokenService(token, addr);
LOG.info("Service for token is " + token.getService());
current.addToken(token);
current.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
ClientProtocol proxy = null;
try {
proxy = RPC.getProxy(ClientProtocol.class,
ClientProtocol.versionID, addr, conf);
proxy.getServerDefaults();
} finally {
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
return null;
}
});
}
}
| 4,766 | 38.725 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.security;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
import java.net.InetAddress;
import java.net.NetworkInterface;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Enumeration;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.web.WebHdfsConstants;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
import org.apache.hadoop.security.TestDoAsEffectiveUser;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.DefaultImpersonationProvider;
import org.apache.hadoop.security.authorize.ProxyUsers;
import org.apache.hadoop.security.token.Token;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
import org.mockito.internal.util.reflection.Whitebox;
public class TestDelegationTokenForProxyUser {
private static MiniDFSCluster cluster;
private static Configuration config;
final private static String GROUP1_NAME = "group1";
final private static String GROUP2_NAME = "group2";
final private static String[] GROUP_NAMES = new String[] { GROUP1_NAME,
GROUP2_NAME };
final private static String REAL_USER = "RealUser";
final private static String PROXY_USER = "ProxyUser";
private static UserGroupInformation ugi;
private static UserGroupInformation proxyUgi;
private static final Log LOG = LogFactory.getLog(TestDoAsEffectiveUser.class);
private static void configureSuperUserIPAddresses(Configuration conf,
String superUserShortName) throws IOException {
ArrayList<String> ipList = new ArrayList<String>();
Enumeration<NetworkInterface> netInterfaceList = NetworkInterface
.getNetworkInterfaces();
while (netInterfaceList.hasMoreElements()) {
NetworkInterface inf = netInterfaceList.nextElement();
Enumeration<InetAddress> addrList = inf.getInetAddresses();
while (addrList.hasMoreElements()) {
InetAddress addr = addrList.nextElement();
ipList.add(addr.getHostAddress());
}
}
StringBuilder builder = new StringBuilder();
for (String ip : ipList) {
builder.append(ip);
builder.append(',');
}
builder.append("127.0.1.1,");
builder.append(InetAddress.getLocalHost().getCanonicalHostName());
LOG.info("Local Ip addresses: " + builder.toString());
conf.setStrings(DefaultImpersonationProvider.getTestProvider().
getProxySuperuserIpConfKey(superUserShortName),
builder.toString());
}
@BeforeClass
public static void setUp() throws Exception {
config = new HdfsConfiguration();
config.setBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
config.setLong(
DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
config.setLong(
DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
config.setStrings(DefaultImpersonationProvider.getTestProvider().
getProxySuperuserGroupConfKey(REAL_USER),
"group1");
config.setBoolean(
DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
configureSuperUserIPAddresses(config, REAL_USER);
FileSystem.setDefaultUri(config, "hdfs://localhost:" + "0");
cluster = new MiniDFSCluster.Builder(config).build();
cluster.waitActive();
ProxyUsers.refreshSuperUserGroupsConfiguration(config);
ugi = UserGroupInformation.createRemoteUser(REAL_USER);
proxyUgi = UserGroupInformation.createProxyUserForTesting(PROXY_USER, ugi,
GROUP_NAMES);
}
@AfterClass
public static void tearDown() throws Exception {
if(cluster!=null) {
cluster.shutdown();
}
}
@Test(timeout=20000)
public void testDelegationTokenWithRealUser() throws IOException {
try {
Token<?>[] tokens = proxyUgi
.doAs(new PrivilegedExceptionAction<Token<?>[]>() {
@Override
public Token<?>[] run() throws IOException {
return cluster.getFileSystem().addDelegationTokens("RenewerUser", null);
}
});
DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
byte[] tokenId = tokens[0].getIdentifier();
identifier.readFields(new DataInputStream(new ByteArrayInputStream(
tokenId)));
Assert.assertEquals(identifier.getUser().getUserName(), PROXY_USER);
Assert.assertEquals(identifier.getUser().getRealUser().getUserName(),
REAL_USER);
} catch (InterruptedException e) {
//Do Nothing
}
}
@Test(timeout=5000)
public void testWebHdfsDoAs() throws Exception {
WebHdfsTestUtil.LOG.info("START: testWebHdfsDoAs()");
WebHdfsTestUtil.LOG.info("ugi.getShortUserName()=" + ugi.getShortUserName());
final WebHdfsFileSystem webhdfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, config, WebHdfsConstants.WEBHDFS_SCHEME);
final Path root = new Path("/");
cluster.getFileSystem().setPermission(root, new FsPermission((short)0777));
Whitebox.setInternalState(webhdfs, "ugi", proxyUgi);
{
Path responsePath = webhdfs.getHomeDirectory();
WebHdfsTestUtil.LOG.info("responsePath=" + responsePath);
Assert.assertEquals(webhdfs.getUri() + "/user/" + PROXY_USER, responsePath.toString());
}
final Path f = new Path("/testWebHdfsDoAs/a.txt");
{
FSDataOutputStream out = webhdfs.create(f);
out.write("Hello, webhdfs user!".getBytes());
out.close();
final FileStatus status = webhdfs.getFileStatus(f);
WebHdfsTestUtil.LOG.info("status.getOwner()=" + status.getOwner());
Assert.assertEquals(PROXY_USER, status.getOwner());
}
{
final FSDataOutputStream out = webhdfs.append(f);
out.write("\nHello again!".getBytes());
out.close();
final FileStatus status = webhdfs.getFileStatus(f);
WebHdfsTestUtil.LOG.info("status.getOwner()=" + status.getOwner());
WebHdfsTestUtil.LOG.info("status.getLen() =" + status.getLen());
Assert.assertEquals(PROXY_USER, status.getOwner());
}
}
}
| 7,611 | 38.853403 | 123 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.security.token.block;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.File;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.EnumSet;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ClientDatanodeProtocolService;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto;
import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolPB;
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
import org.apache.hadoop.io.TestWritable;
import org.apache.hadoop.ipc.Client;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.SaslInputStream;
import org.apache.hadoop.security.SaslRpcClient;
import org.apache.hadoop.security.SaslRpcServer;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.util.Time;
import org.apache.log4j.Level;
import org.junit.Assert;
import org.junit.Assume;
import org.junit.Before;
import org.junit.Test;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import com.google.protobuf.BlockingService;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
/** Unit tests for block tokens */
public class TestBlockToken {
public static final Log LOG = LogFactory.getLog(TestBlockToken.class);
private static final String ADDRESS = "0.0.0.0";
static {
((Log4JLogger) Client.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) Server.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) SaslRpcClient.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) SaslRpcServer.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) SaslInputStream.LOG).getLogger().setLevel(Level.ALL);
}
/** Directory where we can count our open file descriptors under Linux */
static final File FD_DIR = new File("/proc/self/fd/");
final long blockKeyUpdateInterval = 10 * 60 * 1000; // 10 mins
final long blockTokenLifetime = 2 * 60 * 1000; // 2 mins
final ExtendedBlock block1 = new ExtendedBlock("0", 0L);
final ExtendedBlock block2 = new ExtendedBlock("10", 10L);
final ExtendedBlock block3 = new ExtendedBlock("-10", -108L);
@Before
public void disableKerberos() {
Configuration conf = new Configuration();
conf.set(HADOOP_SECURITY_AUTHENTICATION, "simple");
UserGroupInformation.setConfiguration(conf);
}
private static class GetLengthAnswer implements
Answer<GetReplicaVisibleLengthResponseProto> {
final BlockTokenSecretManager sm;
final BlockTokenIdentifier ident;
public GetLengthAnswer(BlockTokenSecretManager sm,
BlockTokenIdentifier ident) {
this.sm = sm;
this.ident = ident;
}
@Override
public GetReplicaVisibleLengthResponseProto answer(
InvocationOnMock invocation) throws IOException {
Object args[] = invocation.getArguments();
assertEquals(2, args.length);
GetReplicaVisibleLengthRequestProto req =
(GetReplicaVisibleLengthRequestProto) args[1];
Set<TokenIdentifier> tokenIds = UserGroupInformation.getCurrentUser()
.getTokenIdentifiers();
assertEquals("Only one BlockTokenIdentifier expected", 1, tokenIds.size());
long result = 0;
for (TokenIdentifier tokenId : tokenIds) {
BlockTokenIdentifier id = (BlockTokenIdentifier) tokenId;
LOG.info("Got: " + id.toString());
assertTrue("Received BlockTokenIdentifier is wrong", ident.equals(id));
sm.checkAccess(id, null, PBHelper.convert(req.getBlock()),
BlockTokenIdentifier.AccessMode.WRITE);
result = id.getBlockId();
}
return GetReplicaVisibleLengthResponseProto.newBuilder()
.setLength(result).build();
}
}
private BlockTokenIdentifier generateTokenId(BlockTokenSecretManager sm,
ExtendedBlock block,
EnumSet<BlockTokenIdentifier.AccessMode> accessModes)
throws IOException {
Token<BlockTokenIdentifier> token = sm.generateToken(block, accessModes);
BlockTokenIdentifier id = sm.createIdentifier();
id.readFields(new DataInputStream(new ByteArrayInputStream(token
.getIdentifier())));
return id;
}
@Test
public void testWritable() throws Exception {
TestWritable.testWritable(new BlockTokenIdentifier());
BlockTokenSecretManager sm = new BlockTokenSecretManager(
blockKeyUpdateInterval, blockTokenLifetime, 0, "fake-pool", null);
TestWritable.testWritable(generateTokenId(sm, block1,
EnumSet.allOf(BlockTokenIdentifier.AccessMode.class)));
TestWritable.testWritable(generateTokenId(sm, block2,
EnumSet.of(BlockTokenIdentifier.AccessMode.WRITE)));
TestWritable.testWritable(generateTokenId(sm, block3,
EnumSet.noneOf(BlockTokenIdentifier.AccessMode.class)));
}
private void tokenGenerationAndVerification(BlockTokenSecretManager master,
BlockTokenSecretManager slave) throws Exception {
// single-mode tokens
for (BlockTokenIdentifier.AccessMode mode : BlockTokenIdentifier.AccessMode
.values()) {
// generated by master
Token<BlockTokenIdentifier> token1 = master.generateToken(block1,
EnumSet.of(mode));
master.checkAccess(token1, null, block1, mode);
slave.checkAccess(token1, null, block1, mode);
// generated by slave
Token<BlockTokenIdentifier> token2 = slave.generateToken(block2,
EnumSet.of(mode));
master.checkAccess(token2, null, block2, mode);
slave.checkAccess(token2, null, block2, mode);
}
// multi-mode tokens
Token<BlockTokenIdentifier> mtoken = master.generateToken(block3,
EnumSet.allOf(BlockTokenIdentifier.AccessMode.class));
for (BlockTokenIdentifier.AccessMode mode : BlockTokenIdentifier.AccessMode
.values()) {
master.checkAccess(mtoken, null, block3, mode);
slave.checkAccess(mtoken, null, block3, mode);
}
}
/** test block key and token handling */
@Test
public void testBlockTokenSecretManager() throws Exception {
BlockTokenSecretManager masterHandler = new BlockTokenSecretManager(
blockKeyUpdateInterval, blockTokenLifetime, 0, "fake-pool", null);
BlockTokenSecretManager slaveHandler = new BlockTokenSecretManager(
blockKeyUpdateInterval, blockTokenLifetime, "fake-pool", null);
ExportedBlockKeys keys = masterHandler.exportKeys();
slaveHandler.addKeys(keys);
tokenGenerationAndVerification(masterHandler, slaveHandler);
// key updating
masterHandler.updateKeys();
tokenGenerationAndVerification(masterHandler, slaveHandler);
keys = masterHandler.exportKeys();
slaveHandler.addKeys(keys);
tokenGenerationAndVerification(masterHandler, slaveHandler);
}
private static Server createMockDatanode(BlockTokenSecretManager sm,
Token<BlockTokenIdentifier> token, Configuration conf)
throws IOException, ServiceException {
ClientDatanodeProtocolPB mockDN = mock(ClientDatanodeProtocolPB.class);
BlockTokenIdentifier id = sm.createIdentifier();
id.readFields(new DataInputStream(new ByteArrayInputStream(token
.getIdentifier())));
doAnswer(new GetLengthAnswer(sm, id)).when(mockDN)
.getReplicaVisibleLength(any(RpcController.class),
any(GetReplicaVisibleLengthRequestProto.class));
RPC.setProtocolEngine(conf, ClientDatanodeProtocolPB.class,
ProtobufRpcEngine.class);
BlockingService service = ClientDatanodeProtocolService
.newReflectiveBlockingService(mockDN);
return new RPC.Builder(conf).setProtocol(ClientDatanodeProtocolPB.class)
.setInstance(service).setBindAddress(ADDRESS).setPort(0)
.setNumHandlers(5).setVerbose(true).setSecretManager(sm).build();
}
@Test
public void testBlockTokenRpc() throws Exception {
Configuration conf = new Configuration();
conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
UserGroupInformation.setConfiguration(conf);
BlockTokenSecretManager sm = new BlockTokenSecretManager(
blockKeyUpdateInterval, blockTokenLifetime, 0, "fake-pool", null);
Token<BlockTokenIdentifier> token = sm.generateToken(block3,
EnumSet.allOf(BlockTokenIdentifier.AccessMode.class));
final Server server = createMockDatanode(sm, token, conf);
server.start();
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
final UserGroupInformation ticket = UserGroupInformation
.createRemoteUser(block3.toString());
ticket.addToken(token);
ClientDatanodeProtocol proxy = null;
try {
proxy = DFSUtil.createClientDatanodeProtocolProxy(addr, ticket, conf,
NetUtils.getDefaultSocketFactory(conf));
assertEquals(block3.getBlockId(), proxy.getReplicaVisibleLength(block3));
} finally {
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
}
/**
* Test that fast repeated invocations of createClientDatanodeProtocolProxy
* will not end up using up thousands of sockets. This is a regression test
* for HDFS-1965.
*/
@Test
public void testBlockTokenRpcLeak() throws Exception {
Configuration conf = new Configuration();
conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
UserGroupInformation.setConfiguration(conf);
Assume.assumeTrue(FD_DIR.exists());
BlockTokenSecretManager sm = new BlockTokenSecretManager(
blockKeyUpdateInterval, blockTokenLifetime, 0, "fake-pool", null);
Token<BlockTokenIdentifier> token = sm.generateToken(block3,
EnumSet.allOf(BlockTokenIdentifier.AccessMode.class));
final Server server = createMockDatanode(sm, token, conf);
server.start();
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);
fakeBlock.setBlockToken(token);
// Create another RPC proxy with the same configuration - this will never
// attempt to connect anywhere -- but it causes the refcount on the
// RPC "Client" object to stay above 0 such that RPC.stopProxy doesn't
// actually close the TCP connections to the real target DN.
ClientDatanodeProtocol proxyToNoWhere = RPC.getProxy(
ClientDatanodeProtocol.class, ClientDatanodeProtocol.versionID,
new InetSocketAddress("1.1.1.1", 1),
UserGroupInformation.createRemoteUser("junk"), conf,
NetUtils.getDefaultSocketFactory(conf));
ClientDatanodeProtocol proxy = null;
int fdsAtStart = countOpenFileDescriptors();
try {
long endTime = Time.now() + 3000;
while (Time.now() < endTime) {
proxy = DFSUtil.createClientDatanodeProtocolProxy(fakeDnId, conf, 1000,
false, fakeBlock);
assertEquals(block3.getBlockId(), proxy.getReplicaVisibleLength(block3));
if (proxy != null) {
RPC.stopProxy(proxy);
}
LOG.info("Num open fds:" + countOpenFileDescriptors());
}
int fdsAtEnd = countOpenFileDescriptors();
if (fdsAtEnd - fdsAtStart > 50) {
fail("Leaked " + (fdsAtEnd - fdsAtStart) + " fds!");
}
} finally {
server.stop();
}
RPC.stopProxy(proxyToNoWhere);
}
/**
* @return the current number of file descriptors open by this process.
*/
private static int countOpenFileDescriptors() {
return FD_DIR.list().length;
}
/**
* Test {@link BlockPoolTokenSecretManager}
*/
@Test
public void testBlockPoolTokenSecretManager() throws Exception {
BlockPoolTokenSecretManager bpMgr = new BlockPoolTokenSecretManager();
// Test BlockPoolSecretManager with upto 10 block pools
for (int i = 0; i < 10; i++) {
String bpid = Integer.toString(i);
BlockTokenSecretManager masterHandler = new BlockTokenSecretManager(
blockKeyUpdateInterval, blockTokenLifetime, 0, "fake-pool", null);
BlockTokenSecretManager slaveHandler = new BlockTokenSecretManager(
blockKeyUpdateInterval, blockTokenLifetime, "fake-pool", null);
bpMgr.addBlockPool(bpid, slaveHandler);
ExportedBlockKeys keys = masterHandler.exportKeys();
bpMgr.addKeys(bpid, keys);
tokenGenerationAndVerification(masterHandler, bpMgr.get(bpid));
// Test key updating
masterHandler.updateKeys();
tokenGenerationAndVerification(masterHandler, bpMgr.get(bpid));
keys = masterHandler.exportKeys();
bpMgr.addKeys(bpid, keys);
tokenGenerationAndVerification(masterHandler, bpMgr.get(bpid));
}
}
/**
* This test writes a file and gets the block locations without closing the
* file, and tests the block token in the last block. Block token is verified
* by ensuring it is of correct kind.
*
* @throws IOException
* @throws InterruptedException
*/
@Test
public void testBlockTokenInLastLocatedBlock() throws IOException,
InterruptedException {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(1).build();
cluster.waitActive();
try {
FileSystem fs = cluster.getFileSystem();
String fileName = "/testBlockTokenInLastLocatedBlock";
Path filePath = new Path(fileName);
FSDataOutputStream out = fs.create(filePath, (short) 1);
out.write(new byte[1000]);
// ensure that the first block is written out (see FSOutputSummer#flush)
out.flush();
LocatedBlocks locatedBlocks = cluster.getNameNodeRpc().getBlockLocations(
fileName, 0, 1000);
while (locatedBlocks.getLastLocatedBlock() == null) {
Thread.sleep(100);
locatedBlocks = cluster.getNameNodeRpc().getBlockLocations(fileName, 0,
1000);
}
Token<BlockTokenIdentifier> token = locatedBlocks.getLastLocatedBlock()
.getBlockToken();
Assert.assertEquals(BlockTokenIdentifier.KIND_NAME, token.getKind());
out.close();
} finally {
cluster.shutdown();
}
}
}
| 16,779 | 39.433735 | 111 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/SecurityTestUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.security.token.block;
import java.io.IOException;
import org.apache.hadoop.security.token.Token;
/** Utilities for security tests */
public class SecurityTestUtil {
/**
* check if an access token is expired. return true when token is expired,
* false otherwise
*/
public static boolean isBlockTokenExpired(Token<BlockTokenIdentifier> token)
throws IOException {
return BlockTokenSecretManager.isTokenExpired(token);
}
/**
* set access token lifetime.
*/
public static void setBlockTokenLifetime(BlockTokenSecretManager handler,
long tokenLifetime) {
handler.setTokenLifetime(tokenLifetime);
}
}
| 1,487 | 31.347826 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestLeaseRenewer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.client.impl;
import static org.junit.Assert.assertSame;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSOutputStream;
import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Time;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import com.google.common.base.Supplier;
public class TestLeaseRenewer {
private final String FAKE_AUTHORITY="hdfs://nn1/";
private final UserGroupInformation FAKE_UGI_A =
UserGroupInformation.createUserForTesting(
"myuser", new String[]{"group1"});
private final UserGroupInformation FAKE_UGI_B =
UserGroupInformation.createUserForTesting(
"myuser", new String[]{"group1"});
private DFSClient MOCK_DFSCLIENT;
private LeaseRenewer renewer;
/** Cause renewals often so test runs quickly. */
private static final long FAST_GRACE_PERIOD = 100L;
@Before
public void setupMocksAndRenewer() throws IOException {
MOCK_DFSCLIENT = createMockClient();
renewer = LeaseRenewer.getInstance(
FAKE_AUTHORITY, FAKE_UGI_A, MOCK_DFSCLIENT);
renewer.setGraceSleepPeriod(FAST_GRACE_PERIOD);
}
private DFSClient createMockClient() {
final DfsClientConf mockConf = Mockito.mock(DfsClientConf.class);
Mockito.doReturn((int)FAST_GRACE_PERIOD).when(mockConf).getHdfsTimeout();
DFSClient mock = Mockito.mock(DFSClient.class);
Mockito.doReturn(true).when(mock).isClientRunning();
Mockito.doReturn(mockConf).when(mock).getConf();
Mockito.doReturn("myclient").when(mock).getClientName();
return mock;
}
@Test
public void testInstanceSharing() throws IOException {
// Two lease renewers with the same UGI should return
// the same instance
LeaseRenewer lr = LeaseRenewer.getInstance(
FAKE_AUTHORITY, FAKE_UGI_A, MOCK_DFSCLIENT);
LeaseRenewer lr2 = LeaseRenewer.getInstance(
FAKE_AUTHORITY, FAKE_UGI_A, MOCK_DFSCLIENT);
Assert.assertSame(lr, lr2);
// But a different UGI should return a different instance
LeaseRenewer lr3 = LeaseRenewer.getInstance(
FAKE_AUTHORITY, FAKE_UGI_B, MOCK_DFSCLIENT);
Assert.assertNotSame(lr, lr3);
// A different authority with same UGI should also be a different
// instance.
LeaseRenewer lr4 = LeaseRenewer.getInstance(
"someOtherAuthority", FAKE_UGI_B, MOCK_DFSCLIENT);
Assert.assertNotSame(lr, lr4);
Assert.assertNotSame(lr3, lr4);
}
@Test
public void testRenewal() throws Exception {
// Keep track of how many times the lease gets renewed
final AtomicInteger leaseRenewalCount = new AtomicInteger();
Mockito.doAnswer(new Answer<Boolean>() {
@Override
public Boolean answer(InvocationOnMock invocation) throws Throwable {
leaseRenewalCount.incrementAndGet();
return true;
}
}).when(MOCK_DFSCLIENT).renewLease();
// Set up a file so that we start renewing our lease.
DFSOutputStream mockStream = Mockito.mock(DFSOutputStream.class);
long fileId = 123L;
renewer.put(fileId, mockStream, MOCK_DFSCLIENT);
// Wait for lease to get renewed
long failTime = Time.monotonicNow() + 5000;
while (Time.monotonicNow() < failTime &&
leaseRenewalCount.get() == 0) {
Thread.sleep(50);
}
if (leaseRenewalCount.get() == 0) {
Assert.fail("Did not renew lease at all!");
}
renewer.closeFile(fileId, MOCK_DFSCLIENT);
}
/**
* Regression test for HDFS-2810. In this bug, the LeaseRenewer has handles
* to several DFSClients with the same name, the first of which has no files
* open. Previously, this was causing the lease to not get renewed.
*/
@Test
public void testManyDfsClientsWhereSomeNotOpen() throws Exception {
// First DFSClient has no files open so doesn't renew leases.
final DFSClient mockClient1 = createMockClient();
Mockito.doReturn(false).when(mockClient1).renewLease();
assertSame(renewer, LeaseRenewer.getInstance(
FAKE_AUTHORITY, FAKE_UGI_A, mockClient1));
// Set up a file so that we start renewing our lease.
DFSOutputStream mockStream1 = Mockito.mock(DFSOutputStream.class);
long fileId = 456L;
renewer.put(fileId, mockStream1, mockClient1);
// Second DFSClient does renew lease
final DFSClient mockClient2 = createMockClient();
Mockito.doReturn(true).when(mockClient2).renewLease();
assertSame(renewer, LeaseRenewer.getInstance(
FAKE_AUTHORITY, FAKE_UGI_A, mockClient2));
// Set up a file so that we start renewing our lease.
DFSOutputStream mockStream2 = Mockito.mock(DFSOutputStream.class);
renewer.put(fileId, mockStream2, mockClient2);
// Wait for lease to get renewed
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
try {
Mockito.verify(mockClient1, Mockito.atLeastOnce()).renewLease();
Mockito.verify(mockClient2, Mockito.atLeastOnce()).renewLease();
return true;
} catch (AssertionError err) {
LeaseRenewer.LOG.warn("Not yet satisfied", err);
return false;
} catch (IOException e) {
// should not throw!
throw new RuntimeException(e);
}
}
}, 100, 10000);
renewer.closeFile(fileId, mockClient1);
renewer.closeFile(fileId, mockClient2);
}
@Test
public void testThreadName() throws Exception {
DFSOutputStream mockStream = Mockito.mock(DFSOutputStream.class);
long fileId = 789L;
Assert.assertFalse("Renewer not initially running",
renewer.isRunning());
// Pretend to open a file
renewer.put(fileId, mockStream, MOCK_DFSCLIENT);
Assert.assertTrue("Renewer should have started running",
renewer.isRunning());
// Check the thread name is reasonable
String threadName = renewer.getDaemonName();
Assert.assertEquals("LeaseRenewer:myuser@hdfs://nn1/", threadName);
// Pretend to close the file
renewer.closeFile(fileId, MOCK_DFSCLIENT);
renewer.setEmptyTime(Time.monotonicNow());
// Should stop the renewer running within a few seconds
long failTime = Time.monotonicNow() + 5000;
while (renewer.isRunning() && Time.monotonicNow() < failTime) {
Thread.sleep(50);
}
Assert.assertFalse(renewer.isRunning());
}
}
| 7,454 | 34.5 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestAuthFilter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web;
import java.util.Collections;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import javax.servlet.FilterConfig;
import javax.servlet.ServletContext;
import javax.servlet.ServletException;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
import org.junit.Assert;
import org.junit.Test;
public class TestAuthFilter {
private static class DummyFilterConfig implements FilterConfig {
final Map<String, String> map;
DummyFilterConfig(Map<String,String> map) {
this.map = map;
}
@Override
public String getFilterName() {
return "dummy";
}
@Override
public String getInitParameter(String arg0) {
return map.get(arg0);
}
@Override
public Enumeration<String> getInitParameterNames() {
return Collections.enumeration(map.keySet());
}
@Override
public ServletContext getServletContext() {
return null;
}
}
@Test
public void testGetConfiguration() throws ServletException {
AuthFilter filter = new AuthFilter();
Map<String, String> m = new HashMap<String,String>();
m.put(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY,
"xyz/thehost@REALM");
m.put(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,
"thekeytab");
FilterConfig config = new DummyFilterConfig(m);
Properties p = filter.getConfiguration("random", config);
Assert.assertEquals("xyz/thehost@REALM",
p.getProperty("kerberos.principal"));
Assert.assertEquals("thekeytab", p.getProperty("kerberos.keytab"));
Assert.assertEquals("true",
p.getProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED));
}
@Test
public void testGetSimpleAuthDisabledConfiguration() throws ServletException {
AuthFilter filter = new AuthFilter();
Map<String, String> m = new HashMap<String,String>();
m.put(DFSConfigKeys.DFS_WEB_AUTHENTICATION_SIMPLE_ANONYMOUS_ALLOWED,
"false");
FilterConfig config = new DummyFilterConfig(m);
Properties p = filter.getConfiguration("random", config);
Assert.assertEquals("false",
p.getProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED));
}
@Test
public void testGetSimpleAuthDefaultConfiguration() throws ServletException {
AuthFilter filter = new AuthFilter();
Map<String, String> m = new HashMap<String,String>();
FilterConfig config = new DummyFilterConfig(m);
Properties p = filter.getConfiguration("random", config);
Assert.assertEquals("true",
p.getProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED));
}
}
| 3,563 | 33.941176 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.security.PrivilegedExceptionAction;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.web.resources.HttpOpParam;
import org.apache.hadoop.hdfs.web.resources.Param;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.Assert;
public class WebHdfsTestUtil {
public static final Log LOG = LogFactory.getLog(WebHdfsTestUtil.class);
public static Configuration createConf() {
final Configuration conf = new Configuration();
conf.setBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
return conf;
}
public static WebHdfsFileSystem getWebHdfsFileSystem(
final Configuration conf, String scheme) throws IOException,
URISyntaxException {
final String uri;
if (WebHdfsConstants.WEBHDFS_SCHEME.equals(scheme)) {
uri = WebHdfsConstants.WEBHDFS_SCHEME + "://"
+ conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
} else if (WebHdfsConstants.SWEBHDFS_SCHEME.equals(scheme)) {
uri = WebHdfsConstants.SWEBHDFS_SCHEME + "://"
+ conf.get(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY);
} else {
throw new IllegalArgumentException("unknown scheme:" + scheme);
}
return (WebHdfsFileSystem)FileSystem.get(new URI(uri), conf);
}
public static WebHdfsFileSystem getWebHdfsFileSystemAs(
final UserGroupInformation ugi, final Configuration conf
) throws IOException, InterruptedException {
return getWebHdfsFileSystemAs(ugi, conf, WebHdfsConstants.WEBHDFS_SCHEME);
}
public static WebHdfsFileSystem getWebHdfsFileSystemAs(
final UserGroupInformation ugi, final Configuration conf, String scheme
) throws IOException, InterruptedException {
return ugi.doAs(new PrivilegedExceptionAction<WebHdfsFileSystem>() {
@Override
public WebHdfsFileSystem run() throws Exception {
return getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
}
});
}
public static URL toUrl(final WebHdfsFileSystem webhdfs,
final HttpOpParam.Op op, final Path fspath,
final Param<?,?>... parameters) throws IOException {
final URL url = webhdfs.toUrl(op, fspath, parameters);
WebHdfsTestUtil.LOG.info("url=" + url);
return url;
}
public static Map<?, ?> connectAndGetJson(final HttpURLConnection conn,
final int expectedResponseCode) throws IOException {
conn.connect();
Assert.assertEquals(expectedResponseCode, conn.getResponseCode());
return WebHdfsFileSystem.jsonParse(conn, false);
}
}
| 3,801 | 37.795918 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestOffsetUrlInputStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web;
import static org.junit.Assert.assertEquals;
import java.io.IOException;
import java.net.URL;
import org.junit.Test;
public class TestOffsetUrlInputStream {
@Test
public void testRemoveOffset() throws IOException {
{ //no offset
String s = "http://test/Abc?Length=99";
assertEquals(s, WebHdfsFileSystem.removeOffsetParam(new URL(s)).toString());
}
{ //no parameters
String s = "http://test/Abc";
assertEquals(s, WebHdfsFileSystem.removeOffsetParam(new URL(s)).toString());
}
{ //offset as first parameter
String s = "http://test/Abc?offset=10&Length=99";
assertEquals("http://test/Abc?Length=99",
WebHdfsFileSystem.removeOffsetParam(new URL(s)).toString());
}
{ //offset as second parameter
String s = "http://test/Abc?op=read&OFFset=10&Length=99";
assertEquals("http://test/Abc?op=read&Length=99",
WebHdfsFileSystem.removeOffsetParam(new URL(s)).toString());
}
{ //offset as last parameter
String s = "http://test/Abc?Length=99&offset=10";
assertEquals("http://test/Abc?Length=99",
WebHdfsFileSystem.removeOffsetParam(new URL(s)).toString());
}
{ //offset as the only parameter
String s = "http://test/Abc?offset=10";
assertEquals("http://test/Abc",
WebHdfsFileSystem.removeOffsetParam(new URL(s)).toString());
}
}
}
| 2,236 | 33.415385 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHftpDelegationToken.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web;
import static org.mockito.Matchers.anyBoolean;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.Mockito;
import org.mockito.internal.util.reflection.Whitebox;
public class TestHftpDelegationToken {
/**
* Test whether HftpFileSystem maintain wire-compatibility for 0.20.203 when
* obtaining delegation token. See HDFS-5440 for more details.
*/
@Test
public void testTokenCompatibilityFor203() throws IOException,
URISyntaxException, AuthenticationException {
Configuration conf = new Configuration();
HftpFileSystem fs = new HftpFileSystem();
Token<?> token = new Token<TokenIdentifier>(new byte[0], new byte[0],
DelegationTokenIdentifier.HDFS_DELEGATION_KIND, new Text(
"127.0.0.1:8020"));
Credentials cred = new Credentials();
cred.addToken(WebHdfsConstants.HFTP_TOKEN_KIND, token);
ByteArrayOutputStream os = new ByteArrayOutputStream();
cred.write(new DataOutputStream(os));
HttpURLConnection conn = mock(HttpURLConnection.class);
doReturn(new ByteArrayInputStream(os.toByteArray())).when(conn)
.getInputStream();
doReturn(HttpURLConnection.HTTP_OK).when(conn).getResponseCode();
URLConnectionFactory factory = mock(URLConnectionFactory.class);
doReturn(conn).when(factory).openConnection(Mockito.<URL> any(),
anyBoolean());
final URI uri = new URI("hftp://127.0.0.1:8020");
fs.initialize(uri, conf);
fs.connectionFactory = factory;
UserGroupInformation ugi = UserGroupInformation.createUserForTesting("foo",
new String[] { "bar" });
TokenAspect<HftpFileSystem> tokenAspect = new TokenAspect<HftpFileSystem>(
fs, SecurityUtil.buildTokenService(uri), WebHdfsConstants.HFTP_TOKEN_KIND);
tokenAspect.initDelegationToken(ugi);
tokenAspect.ensureTokenInitialized();
Assert.assertSame(WebHdfsConstants.HFTP_TOKEN_KIND, fs.getRenewToken().getKind());
Token<?> tok = (Token<?>) Whitebox.getInternalState(fs, "delegationToken");
Assert.assertNotSame("Not making a copy of the remote token", token, tok);
Assert.assertEquals(token.getKind(), tok.getKind());
}
}
| 3,829 | 38.484536 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestByteRangeInputStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Matchers.anyBoolean;
import static org.mockito.Matchers.anyLong;
import static org.mockito.Mockito.CALLS_REAL_METHODS;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.net.HttpURLConnection;
import java.net.URL;
import com.google.common.net.HttpHeaders;
import org.apache.hadoop.hdfs.web.ByteRangeInputStream.InputStreamAndFileLength;
import org.junit.Test;
import org.mockito.Mockito;
import org.mockito.internal.util.reflection.Whitebox;
public class TestByteRangeInputStream {
private class ByteRangeInputStreamImpl extends ByteRangeInputStream {
public ByteRangeInputStreamImpl(URLOpener o, URLOpener r)
throws IOException {
super(o, r);
}
@Override
protected URL getResolvedUrl(HttpURLConnection connection)
throws IOException {
return new URL("http://resolvedurl/");
}
}
private ByteRangeInputStream.URLOpener getMockURLOpener(URL url)
throws IOException {
ByteRangeInputStream.URLOpener opener =
mock(ByteRangeInputStream.URLOpener.class, CALLS_REAL_METHODS);
opener.setURL(url);
doReturn(getMockConnection("65535"))
.when(opener).connect(anyLong(), anyBoolean());
return opener;
}
private HttpURLConnection getMockConnection(String length)
throws IOException {
HttpURLConnection mockConnection = mock(HttpURLConnection.class);
doReturn(new ByteArrayInputStream("asdf".getBytes()))
.when(mockConnection).getInputStream();
doReturn(length).when(mockConnection)
.getHeaderField(HttpHeaders.CONTENT_LENGTH);
return mockConnection;
}
@Test
public void testByteRange() throws IOException {
ByteRangeInputStream.URLOpener oMock = getMockURLOpener(
new URL("http://test"));
ByteRangeInputStream.URLOpener rMock = getMockURLOpener(null);
ByteRangeInputStream bris = new ByteRangeInputStreamImpl(oMock, rMock);
bris.seek(0);
assertEquals("getPos wrong", 0, bris.getPos());
bris.read();
assertEquals("Initial call made incorrectly (offset check)",
0, bris.startPos);
assertEquals("getPos should return 1 after reading one byte", 1,
bris.getPos());
verify(oMock, times(1)).connect(0, false);
bris.read();
assertEquals("getPos should return 2 after reading two bytes", 2,
bris.getPos());
// No additional connections should have been made (no seek)
verify(oMock, times(1)).connect(0, false);
rMock.setURL(new URL("http://resolvedurl/"));
bris.seek(100);
bris.read();
assertEquals("Seek to 100 bytes made incorrectly (offset Check)",
100, bris.startPos);
assertEquals("getPos should return 101 after reading one byte", 101,
bris.getPos());
verify(rMock, times(1)).connect(100, true);
bris.seek(101);
bris.read();
// Seek to 101 should not result in another request
verify(rMock, times(1)).connect(100, true);
verify(rMock, times(0)).connect(101, true);
bris.seek(2500);
bris.read();
assertEquals("Seek to 2500 bytes made incorrectly (offset Check)",
2500, bris.startPos);
doReturn(getMockConnection(null))
.when(rMock).connect(anyLong(), anyBoolean());
bris.seek(500);
try {
bris.read();
fail("Exception should be thrown when content-length is not given");
} catch (IOException e) {
assertTrue("Incorrect response message: " + e.getMessage(),
e.getMessage().startsWith(HttpHeaders.CONTENT_LENGTH +
" is missing: "));
}
bris.close();
}
@Test
public void testPropagatedClose() throws IOException {
ByteRangeInputStream bris =
mock(ByteRangeInputStream.class, CALLS_REAL_METHODS);
InputStreamAndFileLength mockStream = new InputStreamAndFileLength(1L,
mock(InputStream.class));
doReturn(mockStream).when(bris).openInputStream(Mockito.anyLong());
Whitebox.setInternalState(bris, "status",
ByteRangeInputStream.StreamStatus.SEEK);
int brisOpens = 0;
int brisCloses = 0;
int isCloses = 0;
// first open, shouldn't close underlying stream
bris.getInputStream();
verify(bris, times(++brisOpens)).openInputStream(Mockito.anyLong());
verify(bris, times(brisCloses)).close();
verify(mockStream.in, times(isCloses)).close();
// stream is open, shouldn't close underlying stream
bris.getInputStream();
verify(bris, times(brisOpens)).openInputStream(Mockito.anyLong());
verify(bris, times(brisCloses)).close();
verify(mockStream.in, times(isCloses)).close();
// seek forces a reopen, should close underlying stream
bris.seek(1);
bris.getInputStream();
verify(bris, times(++brisOpens)).openInputStream(Mockito.anyLong());
verify(bris, times(brisCloses)).close();
verify(mockStream.in, times(++isCloses)).close();
// verify that the underlying stream isn't closed after a seek
// ie. the state was correctly updated
bris.getInputStream();
verify(bris, times(brisOpens)).openInputStream(Mockito.anyLong());
verify(bris, times(brisCloses)).close();
verify(mockStream.in, times(isCloses)).close();
// seeking to same location should be a no-op
bris.seek(1);
bris.getInputStream();
verify(bris, times(brisOpens)).openInputStream(Mockito.anyLong());
verify(bris, times(brisCloses)).close();
verify(mockStream.in, times(isCloses)).close();
// close should of course close
bris.close();
verify(bris, times(++brisCloses)).close();
verify(mockStream.in, times(++isCloses)).close();
// it's already closed, underlying stream should not close
bris.close();
verify(bris, times(++brisCloses)).close();
verify(mockStream.in, times(isCloses)).close();
// it's closed, don't reopen it
boolean errored = false;
try {
bris.getInputStream();
} catch (IOException e) {
errored = true;
assertEquals("Stream closed", e.getMessage());
} finally {
assertTrue("Read a closed steam", errored);
}
verify(bris, times(brisOpens)).openInputStream(Mockito.anyLong());
verify(bris, times(brisCloses)).close();
verify(mockStream.in, times(isCloses)).close();
}
}
| 7,444 | 33.78972 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpFSPorts.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web;
import static org.junit.Assert.assertEquals;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.junit.Before;
import org.junit.Test;
import java.io.IOException;
import java.net.URI;
public class TestHttpFSPorts {
private static final Configuration conf = new Configuration();
@Before
public void setupConfig() {
conf.setInt(HdfsClientConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, 123);
conf.setInt(HdfsClientConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, 456);
}
@Test
public void testWebHdfsCustomDefaultPorts() throws IOException {
URI uri = URI.create("webhdfs://localhost");
WebHdfsFileSystem fs = (WebHdfsFileSystem) FileSystem.get(uri, conf);
assertEquals(123, fs.getDefaultPort());
assertEquals(uri, fs.getUri());
assertEquals("127.0.0.1:123", fs.getCanonicalServiceName());
}
@Test
public void testWebHdfsCustomUriPortWithCustomDefaultPorts() throws IOException {
URI uri = URI.create("webhdfs://localhost:789");
WebHdfsFileSystem fs = (WebHdfsFileSystem) FileSystem.get(uri, conf);
assertEquals(123, fs.getDefaultPort());
assertEquals(uri, fs.getUri());
assertEquals("127.0.0.1:789", fs.getCanonicalServiceName());
}
@Test
public void testSWebHdfsCustomDefaultPorts() throws IOException {
URI uri = URI.create("swebhdfs://localhost");
SWebHdfsFileSystem fs = (SWebHdfsFileSystem) FileSystem.get(uri, conf);
assertEquals(456, fs.getDefaultPort());
assertEquals(uri, fs.getUri());
assertEquals("127.0.0.1:456", fs.getCanonicalServiceName());
}
@Test
public void testSwebHdfsCustomUriPortWithCustomDefaultPorts() throws IOException {
URI uri = URI.create("swebhdfs://localhost:789");
SWebHdfsFileSystem fs = (SWebHdfsFileSystem) FileSystem.get(uri, conf);
assertEquals(456, fs.getDefaultPort());
assertEquals(uri, fs.getUri());
assertEquals("127.0.0.1:789", fs.getCanonicalServiceName());
}
}
| 2,866 | 34.395062 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsWithAuthenticationFilter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.URI;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletResponse;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.net.NetUtils;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestWebHdfsWithAuthenticationFilter {
private static boolean authorized = false;
public static final class CustomizedFilter implements Filter {
@Override
public void init(FilterConfig filterConfig) throws ServletException {
}
@Override
public void doFilter(ServletRequest request, ServletResponse response,
FilterChain chain) throws IOException, ServletException {
if (authorized) {
chain.doFilter(request, response);
} else {
((HttpServletResponse) response)
.sendError(HttpServletResponse.SC_FORBIDDEN);
}
}
@Override
public void destroy() {
}
}
private static Configuration conf;
private static MiniDFSCluster cluster;
private static FileSystem fs;
@BeforeClass
public static void setUp() throws IOException {
conf = new Configuration();
conf.set(DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_KEY,
CustomizedFilter.class.getName());
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "localhost:0");
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
InetSocketAddress addr = cluster.getNameNode().getHttpAddress();
fs = FileSystem.get(
URI.create("webhdfs://" + NetUtils.getHostPortString(addr)), conf);
cluster.waitActive();
}
@AfterClass
public static void tearDown() throws IOException {
fs.close();
cluster.shutdown();
}
@Test
public void testWebHdfsAuthFilter() throws IOException {
// getFileStatus() is supposed to pass through with the default filter.
authorized = false;
try {
fs.getFileStatus(new Path("/"));
Assert.fail("The filter fails to block the request");
} catch (IOException e) {
}
authorized = true;
fs.getFileStatus(new Path("/"));
}
}
| 3,362 | 31.336538 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.fail;
import java.io.EOFException;
import java.io.IOException;
import java.io.OutputStream;
import java.net.HttpURLConnection;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.security.PrivilegedExceptionAction;
import java.util.Random;
import org.apache.commons.io.IOUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.TestDFSClientRetries;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.web.resources.LengthParam;
import org.apache.hadoop.hdfs.web.resources.OffsetParam;
import org.apache.hadoop.hdfs.web.resources.Param;
import org.apache.hadoop.ipc.RetriableException;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.internal.util.reflection.Whitebox;
/** Test WebHDFS */
public class TestWebHDFS {
static final Log LOG = LogFactory.getLog(TestWebHDFS.class);
static final Random RANDOM = new Random();
static final long systemStartTime = System.nanoTime();
/** A timer for measuring performance. */
static class Ticker {
final String name;
final long startTime = System.nanoTime();
private long previousTick = startTime;
Ticker(final String name, String format, Object... args) {
this.name = name;
LOG.info(String.format("\n\n%s START: %s\n",
name, String.format(format, args)));
}
void tick(final long nBytes, String format, Object... args) {
final long now = System.nanoTime();
if (now - previousTick > 10000000000L) {
previousTick = now;
final double mintues = (now - systemStartTime)/60000000000.0;
LOG.info(String.format("\n\n%s %.2f min) %s %s\n", name, mintues,
String.format(format, args), toMpsString(nBytes, now)));
}
}
void end(final long nBytes) {
final long now = System.nanoTime();
final double seconds = (now - startTime)/1000000000.0;
LOG.info(String.format("\n\n%s END: duration=%.2fs %s\n",
name, seconds, toMpsString(nBytes, now)));
}
String toMpsString(final long nBytes, final long now) {
final double mb = nBytes/(double)(1<<20);
final double mps = mb*1000000000.0/(now - startTime);
return String.format("[nBytes=%.2fMB, speed=%.2fMB/s]", mb, mps);
}
}
@Test(timeout=300000)
public void testLargeFile() throws Exception {
largeFileTest(200L << 20); //200MB file length
}
/** Test read and write large files. */
static void largeFileTest(final long fileLength) throws Exception {
final Configuration conf = WebHdfsTestUtil.createConf();
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(3)
.build();
try {
cluster.waitActive();
final FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
final Path dir = new Path("/test/largeFile");
Assert.assertTrue(fs.mkdirs(dir));
final byte[] data = new byte[1 << 20];
RANDOM.nextBytes(data);
final byte[] expected = new byte[2 * data.length];
System.arraycopy(data, 0, expected, 0, data.length);
System.arraycopy(data, 0, expected, data.length, data.length);
final Path p = new Path(dir, "file");
final Ticker t = new Ticker("WRITE", "fileLength=" + fileLength);
final FSDataOutputStream out = fs.create(p);
try {
long remaining = fileLength;
for(; remaining > 0;) {
t.tick(fileLength - remaining, "remaining=%d", remaining);
final int n = (int)Math.min(remaining, data.length);
out.write(data, 0, n);
remaining -= n;
}
} finally {
out.close();
}
t.end(fileLength);
Assert.assertEquals(fileLength, fs.getFileStatus(p).getLen());
final long smallOffset = RANDOM.nextInt(1 << 20) + (1 << 20);
final long largeOffset = fileLength - smallOffset;
final byte[] buf = new byte[data.length];
verifySeek(fs, p, largeOffset, fileLength, buf, expected);
verifySeek(fs, p, smallOffset, fileLength, buf, expected);
verifyPread(fs, p, largeOffset, fileLength, buf, expected);
} finally {
cluster.shutdown();
}
}
static void checkData(long offset, long remaining, int n,
byte[] actual, byte[] expected) {
if (RANDOM.nextInt(100) == 0) {
int j = (int)(offset % actual.length);
for(int i = 0; i < n; i++) {
if (expected[j] != actual[i]) {
Assert.fail("expected[" + j + "]=" + expected[j]
+ " != actual[" + i + "]=" + actual[i]
+ ", offset=" + offset + ", remaining=" + remaining + ", n=" + n);
}
j++;
}
}
}
/** test seek */
static void verifySeek(FileSystem fs, Path p, long offset, long length,
byte[] buf, byte[] expected) throws IOException {
long remaining = length - offset;
long checked = 0;
LOG.info("XXX SEEK: offset=" + offset + ", remaining=" + remaining);
final Ticker t = new Ticker("SEEK", "offset=%d, remaining=%d",
offset, remaining);
final FSDataInputStream in = fs.open(p, 64 << 10);
in.seek(offset);
for(; remaining > 0; ) {
t.tick(checked, "offset=%d, remaining=%d", offset, remaining);
final int n = (int)Math.min(remaining, buf.length);
in.readFully(buf, 0, n);
checkData(offset, remaining, n, buf, expected);
offset += n;
remaining -= n;
checked += n;
}
in.close();
t.end(checked);
}
static void verifyPread(FileSystem fs, Path p, long offset, long length,
byte[] buf, byte[] expected) throws IOException {
long remaining = length - offset;
long checked = 0;
LOG.info("XXX PREAD: offset=" + offset + ", remaining=" + remaining);
final Ticker t = new Ticker("PREAD", "offset=%d, remaining=%d",
offset, remaining);
final FSDataInputStream in = fs.open(p, 64 << 10);
for(; remaining > 0; ) {
t.tick(checked, "offset=%d, remaining=%d", offset, remaining);
final int n = (int)Math.min(remaining, buf.length);
in.readFully(offset, buf, 0, n);
checkData(offset, remaining, n, buf, expected);
offset += n;
remaining -= n;
checked += n;
}
in.close();
t.end(checked);
}
/** Test client retry with namenode restarting. */
@Test(timeout=300000)
public void testNamenodeRestart() throws Exception {
((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);
final Configuration conf = WebHdfsTestUtil.createConf();
TestDFSClientRetries.namenodeRestartTest(conf, true);
}
@Test(timeout=300000)
public void testLargeDirectory() throws Exception {
final Configuration conf = WebHdfsTestUtil.createConf();
final int listLimit = 2;
// force small chunking of directory listing
conf.setInt(DFSConfigKeys.DFS_LIST_LIMIT, listLimit);
// force paths to be only owner-accessible to ensure ugi isn't changing
// during listStatus
FsPermission.setUMask(conf, new FsPermission((short)0077));
final MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
try {
cluster.waitActive();
WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME)
.setPermission(new Path("/"),
new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
// trick the NN into not believing it's not the superuser so we can
// tell if the correct user is used by listStatus
UserGroupInformation.setLoginUser(
UserGroupInformation.createUserForTesting(
"not-superuser", new String[]{"not-supergroup"}));
UserGroupInformation.createUserForTesting("me", new String[]{"my-group"})
.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws IOException, URISyntaxException {
FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
WebHdfsConstants.WEBHDFS_SCHEME);
Path d = new Path("/my-dir");
Assert.assertTrue(fs.mkdirs(d));
for (int i=0; i < listLimit*3; i++) {
Path p = new Path(d, "file-"+i);
Assert.assertTrue(fs.createNewFile(p));
}
Assert.assertEquals(listLimit*3, fs.listStatus(d).length);
return null;
}
});
} finally {
cluster.shutdown();
}
}
@Test(timeout=300000)
public void testNumericalUserName() throws Exception {
final Configuration conf = WebHdfsTestUtil.createConf();
conf.set(HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY, "^[A-Za-z0-9_][A-Za-z0-9" +
"._-]*[$]?$");
final MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
try {
cluster.waitActive();
WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME)
.setPermission(new Path("/"),
new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
UserGroupInformation.createUserForTesting("123", new String[]{"my-group"})
.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws IOException, URISyntaxException {
FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
WebHdfsConstants.WEBHDFS_SCHEME);
Path d = new Path("/my-dir");
Assert.assertTrue(fs.mkdirs(d));
return null;
}
});
} finally {
cluster.shutdown();
}
}
/**
* Test for catching "no datanode" IOException, when to create a file
* but datanode is not running for some reason.
*/
@Test(timeout=300000)
public void testCreateWithNoDN() throws Exception {
MiniDFSCluster cluster = null;
final Configuration conf = WebHdfsTestUtil.createConf();
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
cluster.waitActive();
FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
WebHdfsConstants.WEBHDFS_SCHEME);
fs.create(new Path("/testnodatanode"));
Assert.fail("No exception was thrown");
} catch (IOException ex) {
GenericTestUtils.assertExceptionContains("Failed to find datanode", ex);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
/**
* WebHdfs should be enabled by default after HDFS-5532
*
* @throws Exception
*/
@Test
public void testWebHdfsEnabledByDefault() throws Exception {
Configuration conf = new HdfsConfiguration();
Assert.assertTrue(conf.getBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_KEY,
false));
}
/**
* Test snapshot creation through WebHdfs
*/
@Test
public void testWebHdfsCreateSnapshot() throws Exception {
MiniDFSCluster cluster = null;
final Configuration conf = WebHdfsTestUtil.createConf();
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
WebHdfsConstants.WEBHDFS_SCHEME);
final Path foo = new Path("/foo");
dfs.mkdirs(foo);
try {
webHdfs.createSnapshot(foo);
fail("Cannot create snapshot on a non-snapshottable directory");
} catch (Exception e) {
GenericTestUtils.assertExceptionContains(
"Directory is not a snapshottable directory", e);
}
// allow snapshots on /foo
dfs.allowSnapshot(foo);
// create snapshots on foo using WebHdfs
webHdfs.createSnapshot(foo, "s1");
// create snapshot without specifying name
final Path spath = webHdfs.createSnapshot(foo, null);
Assert.assertTrue(webHdfs.exists(spath));
final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
Assert.assertTrue(webHdfs.exists(s1path));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
/**
* Test snapshot deletion through WebHdfs
*/
@Test
public void testWebHdfsDeleteSnapshot() throws Exception {
MiniDFSCluster cluster = null;
final Configuration conf = WebHdfsTestUtil.createConf();
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
WebHdfsConstants.WEBHDFS_SCHEME);
final Path foo = new Path("/foo");
dfs.mkdirs(foo);
dfs.allowSnapshot(foo);
webHdfs.createSnapshot(foo, "s1");
final Path spath = webHdfs.createSnapshot(foo, null);
Assert.assertTrue(webHdfs.exists(spath));
final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
Assert.assertTrue(webHdfs.exists(s1path));
// delete the two snapshots
webHdfs.deleteSnapshot(foo, "s1");
Assert.assertFalse(webHdfs.exists(s1path));
webHdfs.deleteSnapshot(foo, spath.getName());
Assert.assertFalse(webHdfs.exists(spath));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
/**
* Test snapshot rename through WebHdfs
*/
@Test
public void testWebHdfsRenameSnapshot() throws Exception {
MiniDFSCluster cluster = null;
final Configuration conf = WebHdfsTestUtil.createConf();
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
WebHdfsConstants.WEBHDFS_SCHEME);
final Path foo = new Path("/foo");
dfs.mkdirs(foo);
dfs.allowSnapshot(foo);
webHdfs.createSnapshot(foo, "s1");
final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
Assert.assertTrue(webHdfs.exists(s1path));
// rename s1 to s2
webHdfs.renameSnapshot(foo, "s1", "s2");
Assert.assertFalse(webHdfs.exists(s1path));
final Path s2path = SnapshotTestHelper.getSnapshotRoot(foo, "s2");
Assert.assertTrue(webHdfs.exists(s2path));
webHdfs.deleteSnapshot(foo, "s2");
Assert.assertFalse(webHdfs.exists(s2path));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
/**
* Make sure a RetriableException is thrown when rpcServer is null in
* NamenodeWebHdfsMethods.
*/
@Test
public void testRaceWhileNNStartup() throws Exception {
MiniDFSCluster cluster = null;
final Configuration conf = WebHdfsTestUtil.createConf();
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
final NameNode namenode = cluster.getNameNode();
final NamenodeProtocols rpcServer = namenode.getRpcServer();
Whitebox.setInternalState(namenode, "rpcServer", null);
final Path foo = new Path("/foo");
final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
WebHdfsConstants.WEBHDFS_SCHEME);
try {
webHdfs.mkdirs(foo);
fail("Expected RetriableException");
} catch (RetriableException e) {
GenericTestUtils.assertExceptionContains("Namenode is in startup mode",
e);
}
Whitebox.setInternalState(namenode, "rpcServer", rpcServer);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test
public void testDTInInsecureClusterWithFallback()
throws IOException, URISyntaxException {
MiniDFSCluster cluster = null;
final Configuration conf = WebHdfsTestUtil.createConf();
conf.setBoolean(CommonConfigurationKeys
.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY, true);
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
WebHdfsConstants.WEBHDFS_SCHEME);
Assert.assertNull(webHdfs.getDelegationToken(null));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test
public void testDTInInsecureCluster() throws Exception {
MiniDFSCluster cluster = null;
final Configuration conf = WebHdfsTestUtil.createConf();
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
WebHdfsConstants.WEBHDFS_SCHEME);
webHdfs.getDelegationToken(null);
fail("No exception is thrown.");
} catch (AccessControlException ace) {
Assert.assertTrue(ace.getMessage().startsWith(
WebHdfsFileSystem.CANT_FALLBACK_TO_INSECURE_MSG));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test
public void testWebHdfsOffsetAndLength() throws Exception{
MiniDFSCluster cluster = null;
final Configuration conf = WebHdfsTestUtil.createConf();
final int OFFSET = 42;
final int LENGTH = 512;
final String PATH = "/foo";
byte[] CONTENTS = new byte[1024];
RANDOM.nextBytes(CONTENTS);
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
final WebHdfsFileSystem fs =
WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
try (OutputStream os = fs.create(new Path(PATH))) {
os.write(CONTENTS);
}
InetSocketAddress addr = cluster.getNameNode().getHttpAddress();
URL url = new URL("http", addr.getHostString(), addr
.getPort(), WebHdfsFileSystem.PATH_PREFIX + PATH + "?op=OPEN" +
Param.toSortedString("&", new OffsetParam((long) OFFSET),
new LengthParam((long) LENGTH))
);
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setInstanceFollowRedirects(true);
Assert.assertEquals(LENGTH, conn.getContentLength());
byte[] subContents = new byte[LENGTH];
byte[] realContents = new byte[LENGTH];
System.arraycopy(CONTENTS, OFFSET, subContents, 0, LENGTH);
IOUtils.readFully(conn.getInputStream(), realContents);
Assert.assertArrayEquals(subContents, realContents);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test
public void testWebHdfsPread() throws Exception {
final Configuration conf = WebHdfsTestUtil.createConf();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
.build();
byte[] content = new byte[1024];
RANDOM.nextBytes(content);
final Path foo = new Path("/foo");
FSDataInputStream in = null;
try {
final WebHdfsFileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
WebHdfsConstants.WEBHDFS_SCHEME);
try (OutputStream os = fs.create(foo)) {
os.write(content);
}
// pread
in = fs.open(foo, 1024);
byte[] buf = new byte[1024];
try {
in.readFully(1020, buf, 0, 5);
Assert.fail("EOF expected");
} catch (EOFException ignored) {}
// mix pread with stateful read
int length = in.read(buf, 0, 512);
in.readFully(100, new byte[1024], 0, 100);
int preadLen = in.read(200, new byte[1024], 0, 200);
Assert.assertTrue(preadLen > 0);
IOUtils.readFully(in, buf, length, 1024 - length);
Assert.assertArrayEquals(content, buf);
} finally {
if (in != null) {
in.close();
}
cluster.shutdown();
}
}
@Test(timeout = 30000)
public void testGetHomeDirectory() throws Exception {
MiniDFSCluster cluster = null;
try {
Configuration conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
DistributedFileSystem hdfs = cluster.getFileSystem();
final URI uri = new URI(WebHdfsConstants.WEBHDFS_SCHEME + "://"
+ cluster.getHttpUri(0).replace("http://", ""));
final Configuration confTemp = new Configuration();
{
WebHdfsFileSystem webhdfs = (WebHdfsFileSystem) FileSystem.get(uri,
confTemp);
assertEquals(hdfs.getHomeDirectory().toUri().getPath(), webhdfs
.getHomeDirectory().toUri().getPath());
webhdfs.close();
}
{
WebHdfsFileSystem webhdfs = createWebHDFSAsTestUser(confTemp, uri,
"XXX");
assertNotEquals(hdfs.getHomeDirectory().toUri().getPath(), webhdfs
.getHomeDirectory().toUri().getPath());
webhdfs.close();
}
} finally {
if (cluster != null)
cluster.shutdown();
}
}
private WebHdfsFileSystem createWebHDFSAsTestUser(final Configuration conf,
final URI uri, final String userName) throws Exception {
final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
userName, new String[] { "supergroup" });
return ugi.doAs(new PrivilegedExceptionAction<WebHdfsFileSystem>() {
@Override
public WebHdfsFileSystem run() throws IOException {
WebHdfsFileSystem webhdfs = (WebHdfsFileSystem) FileSystem.get(uri,
conf);
return webhdfs;
}
});
}
}
| 23,710 | 34.179525 | 104 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web;
import java.io.File;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.InetSocketAddress;
import java.net.URI;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestHttpsFileSystem {
private static final String BASEDIR = System.getProperty("test.build.dir",
"target/test-dir") + "/" + TestHttpsFileSystem.class.getSimpleName();
private static MiniDFSCluster cluster;
private static Configuration conf;
private static String keystoresDir;
private static String sslConfDir;
private static String nnAddr;
@BeforeClass
public static void setUp() throws Exception {
conf = new Configuration();
conf.setBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
conf.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
File base = new File(BASEDIR);
FileUtil.fullyDelete(base);
base.mkdirs();
keystoresDir = new File(BASEDIR).getAbsolutePath();
sslConfDir = KeyStoreTestUtil.getClasspathDir(TestHttpsFileSystem.class);
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
OutputStream os = cluster.getFileSystem().create(new Path("/test"));
os.write(23);
os.close();
InetSocketAddress addr = cluster.getNameNode().getHttpsAddress();
nnAddr = NetUtils.getHostPortString(addr);
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, nnAddr);
}
@AfterClass
public static void tearDown() throws Exception {
cluster.shutdown();
FileUtil.fullyDelete(new File(BASEDIR));
KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
}
@Test
public void testHsftpFileSystem() throws Exception {
FileSystem fs = FileSystem.get(new URI("hsftp://" + nnAddr), conf);
Assert.assertTrue(fs.exists(new Path("/test")));
InputStream is = fs.open(new Path("/test"));
Assert.assertEquals(23, is.read());
is.close();
fs.close();
}
@Test
public void testSWebHdfsFileSystem() throws Exception {
FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, "swebhdfs");
final Path f = new Path("/testswebhdfs");
FSDataOutputStream os = fs.create(f);
os.write(23);
os.close();
Assert.assertTrue(fs.exists(f));
InputStream is = fs.open(f);
Assert.assertEquals(23, is.read());
is.close();
fs.close();
}
}
| 3,982 | 35.209091 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.doReturn;
import java.io.IOException;
import java.io.InputStream;
import java.net.HttpURLConnection;
import java.net.URI;
import java.net.URL;
import java.security.PrivilegedExceptionAction;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FSMainOperationsBaseTest;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.AppendTestUtil;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.web.resources.ExceptionHandler;
import org.apache.hadoop.hdfs.web.resources.GetOpParam;
import org.apache.hadoop.hdfs.web.resources.HttpOpParam;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.log4j.Level;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestFSMainOperationsWebHdfs extends FSMainOperationsBaseTest {
{
((Log4JLogger)ExceptionHandler.LOG).getLogger().setLevel(Level.ALL);
}
private static MiniDFSCluster cluster = null;
private static Path defaultWorkingDirectory;
private static FileSystem fileSystem;
public TestFSMainOperationsWebHdfs() {
super("/tmp/TestFSMainOperationsWebHdfs");
}
@Override
protected FileSystem createFileSystem() throws Exception {
return fileSystem;
}
@BeforeClass
public static void setupCluster() {
final Configuration conf = new Configuration();
conf.setBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
cluster.waitActive();
//change root permission to 777
cluster.getFileSystem().setPermission(
new Path("/"), new FsPermission((short)0777));
final String uri = WebHdfsConstants.WEBHDFS_SCHEME + "://"
+ conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
//get file system as a non-superuser
final UserGroupInformation current = UserGroupInformation.getCurrentUser();
final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
current.getShortUserName() + "x", new String[]{"user"});
fileSystem = ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws Exception {
return FileSystem.get(new URI(uri), conf);
}
});
defaultWorkingDirectory = fileSystem.getWorkingDirectory();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
@AfterClass
public static void shutdownCluster() {
if (cluster != null) {
cluster.shutdown();
cluster = null;
}
}
@Override
protected Path getDefaultWorkingDirectory() {
return defaultWorkingDirectory;
}
@Test
public void testConcat() throws Exception {
Path[] paths = {new Path("/test/hadoop/file1"),
new Path("/test/hadoop/file2"),
new Path("/test/hadoop/file3")};
DFSTestUtil.createFile(fSys, paths[0], 1024, (short) 3, 0);
DFSTestUtil.createFile(fSys, paths[1], 1024, (short) 3, 0);
DFSTestUtil.createFile(fSys, paths[2], 1024, (short) 3, 0);
Path catPath = new Path("/test/hadoop/catFile");
DFSTestUtil.createFile(fSys, catPath, 1024, (short) 3, 0);
Assert.assertTrue(exists(fSys, catPath));
fSys.concat(catPath, paths);
Assert.assertFalse(exists(fSys, paths[0]));
Assert.assertFalse(exists(fSys, paths[1]));
Assert.assertFalse(exists(fSys, paths[2]));
FileStatus fileStatus = fSys.getFileStatus(catPath);
Assert.assertEquals(1024*4, fileStatus.getLen());
}
@Test
public void testTruncate() throws Exception {
final short repl = 3;
final int blockSize = 1024;
final int numOfBlocks = 2;
Path dir = getTestRootPath(fSys, "test/hadoop");
Path file = getTestRootPath(fSys, "test/hadoop/file");
final byte[] data = getFileData(numOfBlocks, blockSize);
createFile(fSys, file, data, blockSize, repl);
final int newLength = blockSize;
boolean isReady = fSys.truncate(file, newLength);
Assert.assertTrue("Recovery is not expected.", isReady);
FileStatus fileStatus = fSys.getFileStatus(file);
Assert.assertEquals(fileStatus.getLen(), newLength);
AppendTestUtil.checkFullFile(fSys, file, newLength, data, file.toString());
ContentSummary cs = fSys.getContentSummary(dir);
Assert.assertEquals("Bad disk space usage", cs.getSpaceConsumed(),
newLength * repl);
Assert.assertTrue("Deleted", fSys.delete(dir, true));
}
// Test that WebHdfsFileSystem.jsonParse() closes the connection's input
// stream.
// Closing the inputstream in jsonParse will allow WebHDFS to reuse
// connections to the namenode rather than needing to always open new ones.
boolean closedInputStream = false;
@Test
public void testJsonParseClosesInputStream() throws Exception {
final WebHdfsFileSystem webhdfs = (WebHdfsFileSystem)fileSystem;
Path file = getTestRootPath(fSys, "test/hadoop/file");
createFile(file);
final HttpOpParam.Op op = GetOpParam.Op.GETHOMEDIRECTORY;
final URL url = webhdfs.toUrl(op, file);
final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod(op.getType().toString());
conn.connect();
InputStream myIn = new InputStream(){
private HttpURLConnection localConn = conn;
@Override
public void close() throws IOException {
closedInputStream = true;
localConn.getInputStream().close();
}
@Override
public int read() throws IOException {
return localConn.getInputStream().read();
}
};
final HttpURLConnection spyConn = spy(conn);
doReturn(myIn).when(spyConn).getInputStream();
try {
Assert.assertFalse(closedInputStream);
WebHdfsFileSystem.jsonParse(spyConn, false);
Assert.assertTrue(closedInputStream);
} catch(IOException ioe) {
junit.framework.TestCase.fail();
}
conn.disconnect();
}
@Override
@Test
public void testMkdirsFailsForSubdirectoryOfExistingFile() throws Exception {
Path testDir = getTestRootPath(fSys, "test/hadoop");
Assert.assertFalse(exists(fSys, testDir));
fSys.mkdirs(testDir);
Assert.assertTrue(exists(fSys, testDir));
createFile(getTestRootPath(fSys, "test/hadoop/file"));
Path testSubDir = getTestRootPath(fSys, "test/hadoop/file/subdir");
try {
fSys.mkdirs(testSubDir);
Assert.fail("Should throw IOException.");
} catch (IOException e) {
// expected
}
try {
Assert.assertFalse(exists(fSys, testSubDir));
} catch(AccessControlException e) {
// also okay for HDFS.
}
Path testDeepSubDir = getTestRootPath(fSys, "test/hadoop/file/deep/sub/dir");
try {
fSys.mkdirs(testDeepSubDir);
Assert.fail("Should throw IOException.");
} catch (IOException e) {
// expected
}
try {
Assert.assertFalse(exists(fSys, testDeepSubDir));
} catch(AccessControlException e) {
// also okay for HDFS.
}
}
}
| 8,480 | 33.336032 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsWithMultipleNameNodes.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web;
import java.net.InetSocketAddress;
import java.net.URI;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
/**
* Test WebHDFS with multiple NameNodes
*/
public class TestWebHdfsWithMultipleNameNodes {
static final Log LOG = WebHdfsTestUtil.LOG;
static private void setLogLevel() {
((Log4JLogger)LOG).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(NamenodeWebHdfsMethods.LOG, Level.ALL);
DFSTestUtil.setNameNodeLogLevel(Level.ALL);
}
private static final Configuration conf = new HdfsConfiguration();
private static MiniDFSCluster cluster;
private static WebHdfsFileSystem[] webhdfs;
@BeforeClass
public static void setupTest() {
setLogLevel();
try {
setupCluster(4, 3);
} catch(Exception e) {
throw new RuntimeException(e);
}
}
private static void setupCluster(final int nNameNodes, final int nDataNodes)
throws Exception {
LOG.info("nNameNodes=" + nNameNodes + ", nDataNodes=" + nDataNodes);
conf.setBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleFederatedTopology(nNameNodes))
.numDataNodes(nDataNodes)
.build();
cluster.waitActive();
webhdfs = new WebHdfsFileSystem[nNameNodes];
for(int i = 0; i < webhdfs.length; i++) {
final InetSocketAddress addr = cluster.getNameNode(i).getHttpAddress();
final String uri = WebHdfsConstants.WEBHDFS_SCHEME + "://"
+ addr.getHostName() + ":" + addr.getPort() + "/";
webhdfs[i] = (WebHdfsFileSystem)FileSystem.get(new URI(uri), conf);
}
}
@AfterClass
public static void shutdownCluster() {
if (cluster != null) {
cluster.shutdown();
cluster = null;
}
}
private static String createString(String prefix, int i) {
//The suffix is to make sure the strings have different lengths.
final String suffix = "*********************".substring(0, i+1);
return prefix + i + suffix + "\n";
}
private static String[] createStrings(String prefix, String name) {
final String[] strings = new String[webhdfs.length];
for(int i = 0; i < webhdfs.length; i++) {
strings[i] = createString(prefix, i);
LOG.info(name + "[" + i + "] = " + strings[i]);
}
return strings;
}
@Test
public void testRedirect() throws Exception {
final String dir = "/testRedirect/";
final String filename = "file";
final Path p = new Path(dir, filename);
final String[] writeStrings = createStrings("write to webhdfs ", "write");
final String[] appendStrings = createStrings("append to webhdfs ", "append");
//test create: create a file for each namenode
for(int i = 0; i < webhdfs.length; i++) {
final FSDataOutputStream out = webhdfs[i].create(p);
out.write(writeStrings[i].getBytes());
out.close();
}
for(int i = 0; i < webhdfs.length; i++) {
//check file length
final long expected = writeStrings[i].length();
Assert.assertEquals(expected, webhdfs[i].getFileStatus(p).getLen());
}
//test read: check file content for each namenode
for(int i = 0; i < webhdfs.length; i++) {
final FSDataInputStream in = webhdfs[i].open(p);
for(int c, j = 0; (c = in.read()) != -1; j++) {
Assert.assertEquals(writeStrings[i].charAt(j), c);
}
in.close();
}
//test append: append to the file for each namenode
for(int i = 0; i < webhdfs.length; i++) {
final FSDataOutputStream out = webhdfs[i].append(p);
out.write(appendStrings[i].getBytes());
out.close();
}
for(int i = 0; i < webhdfs.length; i++) {
//check file length
final long expected = writeStrings[i].length() + appendStrings[i].length();
Assert.assertEquals(expected, webhdfs[i].getFileStatus(p).getLen());
}
//test read: check file content for each namenode
for(int i = 0; i < webhdfs.length; i++) {
final StringBuilder b = new StringBuilder();
final FSDataInputStream in = webhdfs[i].open(p);
for(int c; (c = in.read()) != -1; ) {
b.append((char)c);
}
final int wlen = writeStrings[i].length();
Assert.assertEquals(writeStrings[i], b.substring(0, wlen));
Assert.assertEquals(appendStrings[i], b.substring(wlen));
in.close();
}
}
}
| 6,002 | 33.901163 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.net.HttpURLConnection;
import java.net.InetSocketAddress;
import java.net.ServerSocket;
import java.net.Socket;
import java.net.SocketTimeoutException;
import java.nio.channels.SocketChannel;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
/**
* This test suite checks that WebHdfsFileSystem sets connection timeouts and
* read timeouts on its sockets, thus preventing threads from hanging
* indefinitely on an undefined/infinite timeout. The tests work by starting a
* bogus server on the namenode HTTP port, which is rigged to not accept new
* connections or to accept connections but not send responses.
*/
public class TestWebHdfsTimeouts {
private static final Log LOG = LogFactory.getLog(TestWebHdfsTimeouts.class);
private static final int CLIENTS_TO_CONSUME_BACKLOG = 100;
private static final int CONNECTION_BACKLOG = 1;
private static final int SHORT_SOCKET_TIMEOUT = 5;
private static final int TEST_TIMEOUT = 10000;
private List<SocketChannel> clients;
private WebHdfsFileSystem fs;
private InetSocketAddress nnHttpAddress;
private ServerSocket serverSocket;
private Thread serverThread;
private final URLConnectionFactory connectionFactory = new URLConnectionFactory(new ConnectionConfigurator() {
@Override
public HttpURLConnection configure(HttpURLConnection conn) throws IOException {
conn.setReadTimeout(SHORT_SOCKET_TIMEOUT);
conn.setConnectTimeout(SHORT_SOCKET_TIMEOUT);
return conn;
}
});
@Before
public void setUp() throws Exception {
Configuration conf = WebHdfsTestUtil.createConf();
serverSocket = new ServerSocket(0, CONNECTION_BACKLOG);
nnHttpAddress = new InetSocketAddress("localhost", serverSocket.getLocalPort());
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "localhost:" + serverSocket.getLocalPort());
fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
fs.connectionFactory = connectionFactory;
clients = new ArrayList<SocketChannel>();
serverThread = null;
}
@After
public void tearDown() throws Exception {
IOUtils.cleanup(LOG, clients.toArray(new SocketChannel[clients.size()]));
IOUtils.cleanup(LOG, fs);
if (serverSocket != null) {
try {
serverSocket.close();
} catch (IOException e) {
LOG.debug("Exception in closing " + serverSocket, e);
}
}
if (serverThread != null) {
serverThread.join();
}
}
/**
* Expect connect timeout, because the connection backlog is consumed.
*/
@Test(timeout=TEST_TIMEOUT)
public void testConnectTimeout() throws Exception {
consumeConnectionBacklog();
try {
fs.listFiles(new Path("/"), false);
fail("expected timeout");
} catch (SocketTimeoutException e) {
assertEquals("connect timed out", e.getMessage());
}
}
/**
* Expect read timeout, because the bogus server never sends a reply.
*/
@Test(timeout=TEST_TIMEOUT)
public void testReadTimeout() throws Exception {
try {
fs.listFiles(new Path("/"), false);
fail("expected timeout");
} catch (SocketTimeoutException e) {
assertEquals("Read timed out", e.getMessage());
}
}
/**
* Expect connect timeout on a URL that requires auth, because the connection
* backlog is consumed.
*/
@Test(timeout=TEST_TIMEOUT)
public void testAuthUrlConnectTimeout() throws Exception {
consumeConnectionBacklog();
try {
fs.getDelegationToken("renewer");
fail("expected timeout");
} catch (SocketTimeoutException e) {
assertEquals("connect timed out", e.getMessage());
}
}
/**
* Expect read timeout on a URL that requires auth, because the bogus server
* never sends a reply.
*/
@Test(timeout=TEST_TIMEOUT)
public void testAuthUrlReadTimeout() throws Exception {
try {
fs.getDelegationToken("renewer");
fail("expected timeout");
} catch (SocketTimeoutException e) {
assertEquals("Read timed out", e.getMessage());
}
}
/**
* After a redirect, expect connect timeout accessing the redirect location,
* because the connection backlog is consumed.
*/
@Test(timeout=TEST_TIMEOUT)
public void testRedirectConnectTimeout() throws Exception {
startSingleTemporaryRedirectResponseThread(true);
try {
fs.getFileChecksum(new Path("/file"));
fail("expected timeout");
} catch (SocketTimeoutException e) {
assertEquals("connect timed out", e.getMessage());
}
}
/**
* After a redirect, expect read timeout accessing the redirect location,
* because the bogus server never sends a reply.
*/
@Test(timeout=TEST_TIMEOUT)
public void testRedirectReadTimeout() throws Exception {
startSingleTemporaryRedirectResponseThread(false);
try {
fs.getFileChecksum(new Path("/file"));
fail("expected timeout");
} catch (SocketTimeoutException e) {
assertEquals("Read timed out", e.getMessage());
}
}
/**
* On the second step of two-step write, expect connect timeout accessing the
* redirect location, because the connection backlog is consumed.
*/
@Test(timeout=TEST_TIMEOUT)
public void testTwoStepWriteConnectTimeout() throws Exception {
startSingleTemporaryRedirectResponseThread(true);
OutputStream os = null;
try {
os = fs.create(new Path("/file"));
fail("expected timeout");
} catch (SocketTimeoutException e) {
assertEquals("connect timed out", e.getMessage());
} finally {
IOUtils.cleanup(LOG, os);
}
}
/**
* On the second step of two-step write, expect read timeout accessing the
* redirect location, because the bogus server never sends a reply.
*/
@Test(timeout=TEST_TIMEOUT)
public void testTwoStepWriteReadTimeout() throws Exception {
startSingleTemporaryRedirectResponseThread(false);
OutputStream os = null;
try {
os = fs.create(new Path("/file"));
os.close(); // must close stream to force reading the HTTP response
os = null;
fail("expected timeout");
} catch (SocketTimeoutException e) {
assertEquals("Read timed out", e.getMessage());
} finally {
IOUtils.cleanup(LOG, os);
}
}
/**
* Starts a background thread that accepts one and only one client connection
* on the server socket, sends an HTTP 307 Temporary Redirect response, and
* then exits. This is useful for testing timeouts on the second step of
* methods that issue 2 HTTP requests (request 1, redirect, request 2).
*
* For handling the first request, this method sets socket timeout to use the
* initial values defined in URLUtils. Afterwards, it guarantees that the
* second request will use a very short timeout.
*
* Optionally, the thread may consume the connection backlog immediately after
* receiving its one and only client connection. This is useful for forcing a
* connection timeout on the second request.
*
* On tearDown, open client connections are closed, and the thread is joined.
*
* @param consumeConnectionBacklog boolean whether or not to consume connection
* backlog and thus force a connection timeout on the second request
*/
private void startSingleTemporaryRedirectResponseThread(
final boolean consumeConnectionBacklog) {
fs.connectionFactory = URLConnectionFactory.DEFAULT_SYSTEM_CONNECTION_FACTORY;
serverThread = new Thread() {
@Override
public void run() {
Socket clientSocket = null;
OutputStream out = null;
InputStream in = null;
InputStreamReader isr = null;
BufferedReader br = null;
try {
// Accept one and only one client connection.
clientSocket = serverSocket.accept();
// Immediately setup conditions for subsequent connections.
fs.connectionFactory = connectionFactory;
if (consumeConnectionBacklog) {
consumeConnectionBacklog();
}
// Consume client's HTTP request by reading until EOF or empty line.
in = clientSocket.getInputStream();
isr = new InputStreamReader(in);
br = new BufferedReader(isr);
for (;;) {
String line = br.readLine();
if (line == null || line.isEmpty()) {
break;
}
}
// Write response.
out = clientSocket.getOutputStream();
out.write(temporaryRedirect().getBytes("UTF-8"));
} catch (IOException e) {
// Fail the test on any I/O error in the server thread.
LOG.error("unexpected IOException in server thread", e);
fail("unexpected IOException in server thread: " + e);
} finally {
// Clean it all up.
IOUtils.cleanup(LOG, br, isr, in, out);
IOUtils.closeSocket(clientSocket);
}
}
};
serverThread.start();
}
/**
* Consumes the test server's connection backlog by spamming non-blocking
* SocketChannel client connections. We never do anything with these sockets
* beyond just initiaing the connections. The method saves a reference to each
* new SocketChannel so that it can be closed during tearDown. We define a
* very small connection backlog, but the OS may silently enforce a larger
* minimum backlog than requested. To work around this, we create far more
* client connections than our defined backlog.
*
* @throws IOException thrown for any I/O error
*/
private void consumeConnectionBacklog() throws IOException {
for (int i = 0; i < CLIENTS_TO_CONSUME_BACKLOG; ++i) {
SocketChannel client = SocketChannel.open();
client.configureBlocking(false);
client.connect(nnHttpAddress);
clients.add(client);
}
}
/**
* Creates an HTTP 307 response with the redirect location set back to the
* test server's address. HTTP is supposed to terminate newlines with CRLF, so
* we hard-code that instead of using the line separator property.
*
* @return String HTTP 307 response
*/
private String temporaryRedirect() {
return "HTTP/1.1 307 Temporary Redirect\r\n" +
"Location: http://" + NetUtils.getHostPortString(nnHttpAddress) + "\r\n" +
"\r\n";
}
}
| 11,813 | 34.477477 | 112 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSAcl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web;
import org.apache.hadoop.hdfs.server.namenode.FSAclBaseTest;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Test;
/**
* Tests ACL APIs via WebHDFS.
*/
public class TestWebHDFSAcl extends FSAclBaseTest {
@BeforeClass
public static void init() throws Exception {
conf = WebHdfsTestUtil.createConf();
startCluster();
}
/**
* We need to skip this test on WebHDFS, because WebHDFS currently cannot
* resolve symlinks.
*/
@Override
@Test
@Ignore
public void testDefaultAclNewSymlinkIntermediate() {
}
/**
* Overridden to provide a WebHdfsFileSystem wrapper for the super-user.
*
* @return WebHdfsFileSystem for super-user
* @throws Exception if creation fails
*/
@Override
protected WebHdfsFileSystem createFileSystem() throws Exception {
return WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
}
/**
* Overridden to provide a WebHdfsFileSystem wrapper for a specific user.
*
* @param user UserGroupInformation specific user
* @return WebHdfsFileSystem for specific user
* @throws Exception if creation fails
*/
@Override
protected WebHdfsFileSystem createFileSystem(UserGroupInformation user)
throws Exception {
return WebHdfsTestUtil.getWebHdfsFileSystemAs(user, conf,
WebHdfsConstants.WEBHDFS_SCHEME);
}
}
| 2,267 | 30.5 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHftpFileSystem.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.net.HttpURLConnection;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.net.URLConnection;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
import org.apache.hadoop.util.ServletUtil;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestHftpFileSystem {
private static final String BASEDIR = System.getProperty("test.build.dir",
"target/test-dir") + "/" + TestHftpFileSystem.class.getSimpleName();
private static String keystoresDir;
private static String sslConfDir;
private static Configuration config = null;
private static MiniDFSCluster cluster = null;
private static String blockPoolId = null;
private static String hftpUri = null;
private FileSystem hdfs = null;
private HftpFileSystem hftpFs = null;
private static final Path[] TEST_PATHS = new Path[] {
// URI does not encode, Request#getPathInfo returns /foo
new Path("/foo;bar"),
// URI does not encode, Request#getPathInfo returns verbatim
new Path("/foo+"), new Path("/foo+bar/foo+bar"),
new Path("/foo=bar/foo=bar"), new Path("/foo,bar/foo,bar"),
new Path("/foo@bar/foo@bar"), new Path("/foo&bar/foo&bar"),
new Path("/foo$bar/foo$bar"), new Path("/foo_bar/foo_bar"),
new Path("/foo~bar/foo~bar"), new Path("/foo.bar/foo.bar"),
new Path("/foo../bar/foo../bar"), new Path("/foo.../bar/foo.../bar"),
new Path("/foo'bar/foo'bar"),
new Path("/foo#bar/foo#bar"),
new Path("/foo!bar/foo!bar"),
// HDFS file names may not contain ":"
// URI percent encodes, Request#getPathInfo decodes
new Path("/foo bar/foo bar"), new Path("/foo?bar/foo?bar"),
new Path("/foo\">bar/foo\">bar"), };
@BeforeClass
public static void setUp() throws Exception {
config = new Configuration();
cluster = new MiniDFSCluster.Builder(config).numDataNodes(2).build();
blockPoolId = cluster.getNamesystem().getBlockPoolId();
hftpUri = "hftp://"
+ config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
File base = new File(BASEDIR);
FileUtil.fullyDelete(base);
base.mkdirs();
keystoresDir = new File(BASEDIR).getAbsolutePath();
sslConfDir = KeyStoreTestUtil.getClasspathDir(TestHftpFileSystem.class);
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, config, false);
}
@AfterClass
public static void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
FileUtil.fullyDelete(new File(BASEDIR));
KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
}
@Before
public void initFileSystems() throws IOException {
hdfs = cluster.getFileSystem();
hftpFs = (HftpFileSystem) new Path(hftpUri).getFileSystem(config);
// clear out the namespace
for (FileStatus stat : hdfs.listStatus(new Path("/"))) {
hdfs.delete(stat.getPath(), true);
}
}
@After
public void resetFileSystems() throws IOException {
FileSystem.closeAll();
}
/**
* Test file creation and access with file names that need encoding.
*/
@Test
public void testFileNameEncoding() throws IOException, URISyntaxException {
for (Path p : TEST_PATHS) {
// Create and access the path (data and streamFile servlets)
FSDataOutputStream out = hdfs.create(p, true);
out.writeBytes("0123456789");
out.close();
FSDataInputStream in = hftpFs.open(p);
assertEquals('0', in.read());
in.close();
// Check the file status matches the path. Hftp returns a FileStatus
// with the entire URI, extract the path part.
assertEquals(p, new Path(hftpFs.getFileStatus(p).getPath().toUri()
.getPath()));
// Test list status (listPath servlet)
assertEquals(1, hftpFs.listStatus(p).length);
// Test content summary (contentSummary servlet)
assertNotNull("No content summary", hftpFs.getContentSummary(p));
// Test checksums (fileChecksum and getFileChecksum servlets)
assertNotNull("No file checksum", hftpFs.getFileChecksum(p));
}
}
private void testDataNodeRedirect(Path path) throws IOException {
// Create the file
if (hdfs.exists(path)) {
hdfs.delete(path, true);
}
FSDataOutputStream out = hdfs.create(path, (short) 1);
out.writeBytes("0123456789");
out.close();
// Get the path's block location so we can determine
// if we were redirected to the right DN.
BlockLocation[] locations = hdfs.getFileBlockLocations(path, 0, 10);
String xferAddr = locations[0].getNames()[0];
// Connect to the NN to get redirected
URL u = hftpFs.getNamenodeURL(
"/data" + ServletUtil.encodePath(path.toUri().getPath()),
"ugi=userx,groupy");
HttpURLConnection conn = (HttpURLConnection) u.openConnection();
HttpURLConnection.setFollowRedirects(true);
conn.connect();
conn.getInputStream();
boolean checked = false;
// Find the datanode that has the block according to locations
// and check that the URL was redirected to this DN's info port
for (DataNode node : cluster.getDataNodes()) {
DatanodeRegistration dnR = DataNodeTestUtils.getDNRegistrationForBP(node,
blockPoolId);
if (dnR.getXferAddr().equals(xferAddr)) {
checked = true;
assertEquals(dnR.getInfoPort(), conn.getURL().getPort());
}
}
assertTrue("The test never checked that location of "
+ "the block and hftp desitnation are the same", checked);
}
/**
* Test that clients are redirected to the appropriate DN.
*/
@Test
public void testDataNodeRedirect() throws IOException {
for (Path p : TEST_PATHS) {
testDataNodeRedirect(p);
}
}
/**
* Tests getPos() functionality.
*/
@Test
public void testGetPos() throws IOException {
final Path testFile = new Path("/testfile+1");
// Write a test file.
FSDataOutputStream out = hdfs.create(testFile, true);
out.writeBytes("0123456789");
out.close();
FSDataInputStream in = hftpFs.open(testFile);
// Test read().
for (int i = 0; i < 5; ++i) {
assertEquals(i, in.getPos());
in.read();
}
// Test read(b, off, len).
assertEquals(5, in.getPos());
byte[] buffer = new byte[10];
assertEquals(2, in.read(buffer, 0, 2));
assertEquals(7, in.getPos());
// Test read(b).
int bytesRead = in.read(buffer);
assertEquals(7 + bytesRead, in.getPos());
// Test EOF.
for (int i = 0; i < 100; ++i) {
in.read();
}
assertEquals(10, in.getPos());
in.close();
}
/**
* Tests seek().
*/
@Test
public void testSeek() throws IOException {
final Path testFile = new Path("/testfile+1");
FSDataOutputStream out = hdfs.create(testFile, true);
out.writeBytes("0123456789");
out.close();
FSDataInputStream in = hftpFs.open(testFile);
in.seek(7);
assertEquals('7', in.read());
in.close();
}
@Test
public void testReadClosedStream() throws IOException {
final Path testFile = new Path("/testfile+2");
FSDataOutputStream os = hdfs.create(testFile, true);
os.writeBytes("0123456789");
os.close();
// ByteRangeInputStream delays opens until reads. Make sure it doesn't
// open a closed stream that has never been opened
FSDataInputStream in = hftpFs.open(testFile);
in.close();
checkClosedStream(in);
checkClosedStream(in.getWrappedStream());
// force the stream to connect and then close it
in = hftpFs.open(testFile);
int ch = in.read();
assertEquals('0', ch);
in.close();
checkClosedStream(in);
checkClosedStream(in.getWrappedStream());
// make sure seeking doesn't automagically reopen the stream
in.seek(4);
checkClosedStream(in);
checkClosedStream(in.getWrappedStream());
}
private void checkClosedStream(InputStream is) {
IOException ioe = null;
try {
is.read();
} catch (IOException e) {
ioe = e;
}
assertNotNull("No exception on closed read", ioe);
assertEquals("Stream closed", ioe.getMessage());
}
@Test
public void testHftpDefaultPorts() throws IOException {
Configuration conf = new Configuration();
URI uri = URI.create("hftp://localhost");
HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf);
assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT,
fs.getDefaultPort());
assertEquals(uri, fs.getUri());
// HFTP uses http to get the token so canonical service name should
// return the http port.
assertEquals("127.0.0.1:" + DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT,
fs.getCanonicalServiceName());
}
@Test
public void testHftpCustomDefaultPorts() throws IOException {
Configuration conf = new Configuration();
conf.setInt(HdfsClientConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, 123);
URI uri = URI.create("hftp://localhost");
HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf);
assertEquals(123, fs.getDefaultPort());
assertEquals(uri, fs.getUri());
// HFTP uses http to get the token so canonical service name should
// return the http port.
assertEquals("127.0.0.1:123", fs.getCanonicalServiceName());
}
@Test
public void testHftpCustomUriPortWithDefaultPorts() throws IOException {
Configuration conf = new Configuration();
URI uri = URI.create("hftp://localhost:123");
HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf);
assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT,
fs.getDefaultPort());
assertEquals(uri, fs.getUri());
assertEquals("127.0.0.1:123", fs.getCanonicalServiceName());
}
@Test
public void testHftpCustomUriPortWithCustomDefaultPorts() throws IOException {
Configuration conf = new Configuration();
conf.setInt(HdfsClientConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, 123);
URI uri = URI.create("hftp://localhost:789");
HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf);
assertEquals(123, fs.getDefaultPort());
assertEquals(uri, fs.getUri());
assertEquals("127.0.0.1:789", fs.getCanonicalServiceName());
}
@Test
public void testTimeout() throws IOException {
Configuration conf = new Configuration();
URI uri = URI.create("hftp://localhost");
HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf);
URLConnection conn = fs.connectionFactory.openConnection(new URL(
"http://localhost"));
assertEquals(URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT,
conn.getConnectTimeout());
assertEquals(URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT,
conn.getReadTimeout());
}
// /
@Test
public void testHsftpDefaultPorts() throws IOException {
Configuration conf = new Configuration();
URI uri = URI.create("hsftp://localhost");
HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf);
assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT,
fs.getDefaultPort());
assertEquals(uri, fs.getUri());
assertEquals("127.0.0.1:" + DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT,
fs.getCanonicalServiceName());
}
@Test
public void testHsftpCustomDefaultPorts() throws IOException {
Configuration conf = new Configuration();
conf.setInt(HdfsClientConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, 123);
conf.setInt(HdfsClientConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, 456);
URI uri = URI.create("hsftp://localhost");
HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf);
assertEquals(456, fs.getDefaultPort());
assertEquals(uri, fs.getUri());
assertEquals("127.0.0.1:456", fs.getCanonicalServiceName());
}
@Test
public void testHsftpCustomUriPortWithDefaultPorts() throws IOException {
Configuration conf = new Configuration();
URI uri = URI.create("hsftp://localhost:123");
HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf);
assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT,
fs.getDefaultPort());
assertEquals(uri, fs.getUri());
assertEquals("127.0.0.1:123", fs.getCanonicalServiceName());
}
@Test
public void testHsftpCustomUriPortWithCustomDefaultPorts() throws IOException {
Configuration conf = new Configuration();
conf.setInt(HdfsClientConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, 123);
conf.setInt(HdfsClientConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, 456);
URI uri = URI.create("hsftp://localhost:789");
HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf);
assertEquals(456, fs.getDefaultPort());
assertEquals(uri, fs.getUri());
assertEquals("127.0.0.1:789", fs.getCanonicalServiceName());
}
}
| 14,541 | 32.818605 | 81 |
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.