repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.viewfs;
import java.io.IOException;
import java.net.URISyntaxException;
import javax.security.auth.login.LoginException;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileContextTestHelper;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
public class TestViewFsHdfs extends ViewFsBaseTest {
private static MiniDFSCluster cluster;
private static final HdfsConfiguration CONF = new HdfsConfiguration();
private static FileContext fc;
@Override
protected FileContextTestHelper createFileContextHelper() {
return new FileContextTestHelper("/tmp/TestViewFsHdfs");
}
@BeforeClass
public static void clusterSetupAtBegining() throws IOException,
LoginException, URISyntaxException {
SupportsBlocks = true;
CONF.setBoolean(
DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build();
cluster.waitClusterUp();
fc = FileContext.getFileContext(cluster.getURI(0), CONF);
Path defaultWorkingDirectory = fc.makeQualified( new Path("/user/" +
UserGroupInformation.getCurrentUser().getShortUserName()));
fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true);
}
@AfterClass
public static void ClusterShutdownAtEnd() throws Exception {
cluster.shutdown();
}
@Override
@Before
public void setUp() throws Exception {
// create the test root on local_fs
fcTarget = fc;
super.setUp();
}
/**
* This overrides the default implementation since hdfs does have delegation
* tokens.
*/
@Override
int getExpectedDelegationTokenCount() {
return 8;
}
}
| 2,786 | 30.670455 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsAtHdfsRoot.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.viewfs;
import java.io.IOException;
import java.net.URISyntaxException;
import javax.security.auth.login.LoginException;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileContextTestHelper;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
/**
* Make sure that ViewFs works when the root of an FS is mounted to a ViewFs
* mount point.
*/
public class TestViewFsAtHdfsRoot extends ViewFsBaseTest {
private static MiniDFSCluster cluster;
private static final HdfsConfiguration CONF = new HdfsConfiguration();
private static FileContext fc;
@Override
protected FileContextTestHelper createFileContextHelper() {
return new FileContextTestHelper("/tmp/TestViewFsAtHdfsRoot");
}
@BeforeClass
public static void clusterSetupAtBegining() throws IOException,
LoginException, URISyntaxException {
SupportsBlocks = true;
CONF.setBoolean(
DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build();
cluster.waitClusterUp();
fc = FileContext.getFileContext(cluster.getURI(0), CONF);
}
@AfterClass
public static void ClusterShutdownAtEnd() throws Exception {
cluster.shutdown();
}
@Override
@Before
public void setUp() throws Exception {
// create the test root on local_fs
fcTarget = fc;
super.setUp();
}
/**
* Override this so that we don't set the targetTestRoot to any path under the
* root of the FS, and so that we don't try to delete the test dir, but rather
* only its contents.
*/
@Override
void initializeTargetTestRoot() throws IOException {
targetTestRoot = fc.makeQualified(new Path("/"));
RemoteIterator<FileStatus> dirContents = fc.listStatus(targetTestRoot);
while (dirContents.hasNext()) {
fc.delete(dirContents.next().getPath(), true);
}
}
/**
* This overrides the default implementation since hdfs does have delegation
* tokens.
*/
@Override
int getExpectedDelegationTokenCount() {
return 8;
}
}
| 3,213 | 30.821782 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithAcls.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.viewfs;
import com.google.common.collect.Lists;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import java.io.IOException;
import java.util.List;
import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS;
import static org.apache.hadoop.fs.permission.AclEntryScope.DEFAULT;
import static org.apache.hadoop.fs.permission.AclEntryType.*;
import static org.apache.hadoop.fs.permission.FsAction.*;
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
/**
* Verify ACL through ViewFileSystem functionality.
*/
public class TestViewFileSystemWithAcls {
private static MiniDFSCluster cluster;
private static Configuration clusterConf = new Configuration();
private static FileSystem fHdfs;
private static FileSystem fHdfs2;
private FileSystem fsView;
private Configuration fsViewConf;
private FileSystem fsTarget, fsTarget2;
private Path targetTestRoot, targetTestRoot2, mountOnNn1, mountOnNn2;
private FileSystemTestHelper fileSystemTestHelper =
new FileSystemTestHelper("/tmp/TestViewFileSystemWithAcls");
@BeforeClass
public static void clusterSetupAtBeginning() throws IOException {
clusterConf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
cluster = new MiniDFSCluster.Builder(clusterConf)
.nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
.numDataNodes(2)
.build();
cluster.waitClusterUp();
fHdfs = cluster.getFileSystem(0);
fHdfs2 = cluster.getFileSystem(1);
}
@AfterClass
public static void ClusterShutdownAtEnd() throws Exception {
cluster.shutdown();
}
@Before
public void setUp() throws Exception {
fsTarget = fHdfs;
fsTarget2 = fHdfs2;
targetTestRoot = fileSystemTestHelper.getAbsoluteTestRootPath(fsTarget);
targetTestRoot2 = fileSystemTestHelper.getAbsoluteTestRootPath(fsTarget2);
fsTarget.delete(targetTestRoot, true);
fsTarget2.delete(targetTestRoot2, true);
fsTarget.mkdirs(targetTestRoot);
fsTarget2.mkdirs(targetTestRoot2);
fsViewConf = ViewFileSystemTestSetup.createConfig();
setupMountPoints();
fsView = FileSystem.get(FsConstants.VIEWFS_URI, fsViewConf);
}
private void setupMountPoints() {
mountOnNn1 = new Path("/mountOnNn1");
mountOnNn2 = new Path("/mountOnNn2");
ConfigUtil.addLink(fsViewConf, mountOnNn1.toString(), targetTestRoot.toUri());
ConfigUtil.addLink(fsViewConf, mountOnNn2.toString(), targetTestRoot2.toUri());
}
@After
public void tearDown() throws Exception {
fsTarget.delete(fileSystemTestHelper.getTestRootPath(fsTarget), true);
fsTarget2.delete(fileSystemTestHelper.getTestRootPath(fsTarget2), true);
}
/**
* Verify a ViewFs wrapped over multiple federated NameNodes will
* dispatch the ACL operations to the correct NameNode.
*/
@Test
public void testAclOnMountEntry() throws Exception {
// Set ACLs on the first namespace and verify they are correct
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, USER, "foo", READ),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, OTHER, NONE));
fsView.setAcl(mountOnNn1, aclSpec);
AclEntry[] expected = new AclEntry[] {
aclEntry(ACCESS, USER, "foo", READ),
aclEntry(ACCESS, GROUP, READ) };
assertArrayEquals(expected, aclEntryArray(fsView.getAclStatus(mountOnNn1)));
// Double-check by getting ACL status using FileSystem
// instead of ViewFs
assertArrayEquals(expected, aclEntryArray(fHdfs.getAclStatus(targetTestRoot)));
// Modify the ACL entries on the first namespace
aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, "foo", READ));
fsView.modifyAclEntries(mountOnNn1, aclSpec);
expected = new AclEntry[] {
aclEntry(ACCESS, USER, "foo", READ),
aclEntry(ACCESS, GROUP, READ),
aclEntry(DEFAULT, USER, READ_WRITE),
aclEntry(DEFAULT, USER, "foo", READ),
aclEntry(DEFAULT, GROUP, READ),
aclEntry(DEFAULT, MASK, READ),
aclEntry(DEFAULT, OTHER, NONE) };
assertArrayEquals(expected, aclEntryArray(fsView.getAclStatus(mountOnNn1)));
fsView.removeDefaultAcl(mountOnNn1);
expected = new AclEntry[] {
aclEntry(ACCESS, USER, "foo", READ),
aclEntry(ACCESS, GROUP, READ) };
assertArrayEquals(expected, aclEntryArray(fsView.getAclStatus(mountOnNn1)));
assertArrayEquals(expected, aclEntryArray(fHdfs.getAclStatus(targetTestRoot)));
// Paranoid check: verify the other namespace does not
// have ACLs set on the same path.
assertEquals(0, fsView.getAclStatus(mountOnNn2).getEntries().size());
assertEquals(0, fHdfs2.getAclStatus(targetTestRoot2).getEntries().size());
// Remove the ACL entries on the first namespace
fsView.removeAcl(mountOnNn1);
assertEquals(0, fsView.getAclStatus(mountOnNn1).getEntries().size());
assertEquals(0, fHdfs.getAclStatus(targetTestRoot).getEntries().size());
// Now set ACLs on the second namespace
aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "bar", READ));
fsView.modifyAclEntries(mountOnNn2, aclSpec);
expected = new AclEntry[] {
aclEntry(ACCESS, USER, "bar", READ),
aclEntry(ACCESS, GROUP, READ_EXECUTE) };
assertArrayEquals(expected, aclEntryArray(fsView.getAclStatus(mountOnNn2)));
assertArrayEquals(expected, aclEntryArray(fHdfs2.getAclStatus(targetTestRoot2)));
// Remove the ACL entries on the second namespace
fsView.removeAclEntries(mountOnNn2, Lists.newArrayList(
aclEntry(ACCESS, USER, "bar", READ)
));
expected = new AclEntry[] { aclEntry(ACCESS, GROUP, READ_EXECUTE) };
assertArrayEquals(expected, aclEntryArray(fHdfs2.getAclStatus(targetTestRoot2)));
fsView.removeAcl(mountOnNn2);
assertEquals(0, fsView.getAclStatus(mountOnNn2).getEntries().size());
assertEquals(0, fHdfs2.getAclStatus(targetTestRoot2).getEntries().size());
}
private AclEntry[] aclEntryArray(AclStatus aclStatus) {
return aclStatus.getEntries().toArray(new AclEntry[0]);
}
}
| 7,572 | 38.649215 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.viewfs;
import java.io.IOException;
import java.net.URISyntaxException;
import javax.security.auth.login.LoginException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
/**
* Make sure that ViewFileSystem works when the root of an FS is mounted to a
* ViewFileSystem mount point.
*/
public class TestViewFileSystemAtHdfsRoot extends ViewFileSystemBaseTest {
private static MiniDFSCluster cluster;
private static final Configuration CONF = new Configuration();
private static FileSystem fHdfs;
@Override
protected FileSystemTestHelper createFileSystemHelper() {
return new FileSystemTestHelper("/tmp/TestViewFileSystemAtHdfsRoot");
}
@BeforeClass
public static void clusterSetupAtBegining() throws IOException,
LoginException, URISyntaxException {
SupportsBlocks = true;
CONF.setBoolean(
DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
cluster = new MiniDFSCluster.Builder(CONF)
.numDataNodes(2)
.build();
cluster.waitClusterUp();
fHdfs = cluster.getFileSystem();
}
@AfterClass
public static void clusterShutdownAtEnd() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
@Override
@Before
public void setUp() throws Exception {
fsTarget = fHdfs;
super.setUp();
}
/**
* Override this so that we don't set the targetTestRoot to any path under the
* root of the FS, and so that we don't try to delete the test dir, but rather
* only its contents.
*/
@Override
void initializeTargetTestRoot() throws IOException {
targetTestRoot = fHdfs.makeQualified(new Path("/"));
for (FileStatus status : fHdfs.listStatus(targetTestRoot)) {
fHdfs.delete(status.getPath(), true);
}
}
@Override
int getExpectedDelegationTokenCount() {
return 1; // all point to the same fs so 1 unique token
}
@Override
int getExpectedDelegationTokenCountWithCredentials() {
return 1;
}
}
| 3,155 | 29.640777 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.viewfs;
import java.io.IOException;
import java.net.URISyntaxException;
import javax.security.auth.login.LoginException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
public class TestViewFileSystemHdfs extends ViewFileSystemBaseTest {
private static MiniDFSCluster cluster;
private static Path defaultWorkingDirectory;
private static Path defaultWorkingDirectory2;
private static final Configuration CONF = new Configuration();
private static FileSystem fHdfs;
private static FileSystem fHdfs2;
private FileSystem fsTarget2;
Path targetTestRoot2;
@Override
protected FileSystemTestHelper createFileSystemHelper() {
return new FileSystemTestHelper("/tmp/TestViewFileSystemHdfs");
}
@BeforeClass
public static void clusterSetupAtBegining() throws IOException,
LoginException, URISyntaxException {
SupportsBlocks = true;
CONF.setBoolean(
DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
cluster =
new MiniDFSCluster.Builder(CONF).nnTopology(
MiniDFSNNTopology.simpleFederatedTopology(2))
.numDataNodes(2)
.build();
cluster.waitClusterUp();
fHdfs = cluster.getFileSystem(0);
fHdfs2 = cluster.getFileSystem(1);
fHdfs.getConf().set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,
FsConstants.VIEWFS_URI.toString());
fHdfs2.getConf().set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,
FsConstants.VIEWFS_URI.toString());
defaultWorkingDirectory = fHdfs.makeQualified( new Path("/user/" +
UserGroupInformation.getCurrentUser().getShortUserName()));
defaultWorkingDirectory2 = fHdfs2.makeQualified( new Path("/user/" +
UserGroupInformation.getCurrentUser().getShortUserName()));
fHdfs.mkdirs(defaultWorkingDirectory);
fHdfs2.mkdirs(defaultWorkingDirectory2);
}
@AfterClass
public static void ClusterShutdownAtEnd() throws Exception {
cluster.shutdown();
}
@Override
@Before
public void setUp() throws Exception {
// create the test root on local_fs
fsTarget = fHdfs;
fsTarget2 = fHdfs2;
targetTestRoot2 = new FileSystemTestHelper().getAbsoluteTestRootPath(fsTarget2);
super.setUp();
}
@Override
@After
public void tearDown() throws Exception {
super.tearDown();
}
@Override
void setupMountPoints() {
super.setupMountPoints();
ConfigUtil.addLink(conf, "/mountOnNn2", new Path(targetTestRoot2,
"mountOnNn2").toUri());
}
// Overriden test helper methods - changed values based on hdfs and the
// additional mount.
@Override
int getExpectedDirPaths() {
return 8;
}
@Override
int getExpectedMountPoints() {
return 9;
}
@Override
int getExpectedDelegationTokenCount() {
return 2; // Mount points to 2 unique hdfs
}
@Override
int getExpectedDelegationTokenCountWithCredentials() {
return 2;
}
}
| 4,290 | 29.870504 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithXAttrs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.viewfs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import java.io.IOException;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
/**
* Verify XAttrs through ViewFileSystem functionality.
*/
public class TestViewFileSystemWithXAttrs {
private static MiniDFSCluster cluster;
private static Configuration clusterConf = new Configuration();
private static FileSystem fHdfs;
private static FileSystem fHdfs2;
private FileSystem fsView;
private Configuration fsViewConf;
private FileSystem fsTarget, fsTarget2;
private Path targetTestRoot, targetTestRoot2, mountOnNn1, mountOnNn2;
private FileSystemTestHelper fileSystemTestHelper =
new FileSystemTestHelper("/tmp/TestViewFileSystemWithXAttrs");
// XAttrs
protected static final String name1 = "user.a1";
protected static final byte[] value1 = {0x31, 0x32, 0x33};
protected static final String name2 = "user.a2";
protected static final byte[] value2 = {0x37, 0x38, 0x39};
@BeforeClass
public static void clusterSetupAtBeginning() throws IOException {
cluster = new MiniDFSCluster.Builder(clusterConf)
.nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
.numDataNodes(2)
.build();
cluster.waitClusterUp();
fHdfs = cluster.getFileSystem(0);
fHdfs2 = cluster.getFileSystem(1);
}
@AfterClass
public static void ClusterShutdownAtEnd() throws Exception {
cluster.shutdown();
}
@Before
public void setUp() throws Exception {
fsTarget = fHdfs;
fsTarget2 = fHdfs2;
targetTestRoot = fileSystemTestHelper.getAbsoluteTestRootPath(fsTarget);
targetTestRoot2 = fileSystemTestHelper.getAbsoluteTestRootPath(fsTarget2);
fsTarget.delete(targetTestRoot, true);
fsTarget2.delete(targetTestRoot2, true);
fsTarget.mkdirs(targetTestRoot);
fsTarget2.mkdirs(targetTestRoot2);
fsViewConf = ViewFileSystemTestSetup.createConfig();
setupMountPoints();
fsView = FileSystem.get(FsConstants.VIEWFS_URI, fsViewConf);
}
private void setupMountPoints() {
mountOnNn1 = new Path("/mountOnNn1");
mountOnNn2 = new Path("/mountOnNn2");
ConfigUtil.addLink(fsViewConf, mountOnNn1.toString(),
targetTestRoot.toUri());
ConfigUtil.addLink(fsViewConf, mountOnNn2.toString(),
targetTestRoot2.toUri());
}
@After
public void tearDown() throws Exception {
fsTarget.delete(fileSystemTestHelper.getTestRootPath(fsTarget), true);
fsTarget2.delete(fileSystemTestHelper.getTestRootPath(fsTarget2), true);
}
/**
* Verify a ViewFileSystem wrapped over multiple federated NameNodes will
* dispatch the XAttr operations to the correct NameNode.
*/
@Test
public void testXAttrOnMountEntry() throws Exception {
// Set XAttrs on the first namespace and verify they are correct
fsView.setXAttr(mountOnNn1, name1, value1);
fsView.setXAttr(mountOnNn1, name2, value2);
assertEquals(2, fsView.getXAttrs(mountOnNn1).size());
assertArrayEquals(value1, fsView.getXAttr(mountOnNn1, name1));
assertArrayEquals(value2, fsView.getXAttr(mountOnNn1, name2));
// Double-check by getting the XAttrs using FileSystem
// instead of ViewFileSystem
assertArrayEquals(value1, fHdfs.getXAttr(targetTestRoot, name1));
assertArrayEquals(value2, fHdfs.getXAttr(targetTestRoot, name2));
// Paranoid check: verify the other namespace does not
// have XAttrs set on the same path.
assertEquals(0, fsView.getXAttrs(mountOnNn2).size());
assertEquals(0, fHdfs2.getXAttrs(targetTestRoot2).size());
// Remove the XAttr entries on the first namespace
fsView.removeXAttr(mountOnNn1, name1);
fsView.removeXAttr(mountOnNn1, name2);
assertEquals(0, fsView.getXAttrs(mountOnNn1).size());
assertEquals(0, fHdfs.getXAttrs(targetTestRoot).size());
// Now set XAttrs on the second namespace
fsView.setXAttr(mountOnNn2, name1, value1);
fsView.setXAttr(mountOnNn2, name2, value2);
assertEquals(2, fsView.getXAttrs(mountOnNn2).size());
assertArrayEquals(value1, fsView.getXAttr(mountOnNn2, name1));
assertArrayEquals(value2, fsView.getXAttr(mountOnNn2, name2));
assertArrayEquals(value1, fHdfs2.getXAttr(targetTestRoot2, name1));
assertArrayEquals(value2, fHdfs2.getXAttr(targetTestRoot2, name2));
fsView.removeXAttr(mountOnNn2, name1);
fsView.removeXAttr(mountOnNn2, name2);
assertEquals(0, fsView.getXAttrs(mountOnNn2).size());
assertEquals(0, fHdfs2.getXAttrs(targetTestRoot2).size());
}
}
| 5,763 | 36.921053 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithXAttrs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.viewfs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileContextTestHelper;
import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import java.io.IOException;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
/**
* Verify XAttrs through ViewFs functionality.
*/
public class TestViewFsWithXAttrs {
private static MiniDFSCluster cluster;
private static Configuration clusterConf = new Configuration();
private static FileContext fc, fc2;
private FileContext fcView, fcTarget, fcTarget2;
private Configuration fsViewConf;
private Path targetTestRoot, targetTestRoot2, mountOnNn1, mountOnNn2;
private FileContextTestHelper fileContextTestHelper =
new FileContextTestHelper("/tmp/TestViewFsWithXAttrs");
// XAttrs
protected static final String name1 = "user.a1";
protected static final byte[] value1 = {0x31, 0x32, 0x33};
protected static final String name2 = "user.a2";
protected static final byte[] value2 = {0x37, 0x38, 0x39};
@BeforeClass
public static void clusterSetupAtBeginning() throws IOException {
cluster = new MiniDFSCluster.Builder(clusterConf)
.nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
.numDataNodes(2)
.build();
cluster.waitClusterUp();
fc = FileContext.getFileContext(cluster.getURI(0), clusterConf);
fc2 = FileContext.getFileContext(cluster.getURI(1), clusterConf);
}
@AfterClass
public static void ClusterShutdownAtEnd() throws Exception {
cluster.shutdown();
}
@Before
public void setUp() throws Exception {
fcTarget = fc;
fcTarget2 = fc2;
targetTestRoot = fileContextTestHelper.getAbsoluteTestRootPath(fc);
targetTestRoot2 = fileContextTestHelper.getAbsoluteTestRootPath(fc2);
fcTarget.delete(targetTestRoot, true);
fcTarget2.delete(targetTestRoot2, true);
fcTarget.mkdir(targetTestRoot, new FsPermission((short) 0750), true);
fcTarget2.mkdir(targetTestRoot2, new FsPermission((short) 0750), true);
fsViewConf = ViewFileSystemTestSetup.createConfig();
setupMountPoints();
fcView = FileContext.getFileContext(FsConstants.VIEWFS_URI, fsViewConf);
}
private void setupMountPoints() {
mountOnNn1 = new Path("/mountOnNn1");
mountOnNn2 = new Path("/mountOnNn2");
ConfigUtil.addLink(fsViewConf, mountOnNn1.toString(), targetTestRoot.toUri());
ConfigUtil.addLink(fsViewConf, mountOnNn2.toString(), targetTestRoot2.toUri());
}
@After
public void tearDown() throws Exception {
fcTarget.delete(fileContextTestHelper.getTestRootPath(fcTarget), true);
fcTarget2.delete(fileContextTestHelper.getTestRootPath(fcTarget2), true);
}
/**
* Verify a ViewFs wrapped over multiple federated NameNodes will
* dispatch the XAttr operations to the correct NameNode.
*/
@Test
public void testXAttrOnMountEntry() throws Exception {
// Set XAttrs on the first namespace and verify they are correct
fcView.setXAttr(mountOnNn1, name1, value1);
fcView.setXAttr(mountOnNn1, name2, value2);
assertEquals(2, fcView.getXAttrs(mountOnNn1).size());
assertArrayEquals(value1, fcView.getXAttr(mountOnNn1, name1));
assertArrayEquals(value2, fcView.getXAttr(mountOnNn1, name2));
// Double-check by getting the XAttrs using FileSystem
// instead of ViewFs
assertArrayEquals(value1, fc.getXAttr(targetTestRoot, name1));
assertArrayEquals(value2, fc.getXAttr(targetTestRoot, name2));
// Paranoid check: verify the other namespace does not
// have XAttrs set on the same path.
assertEquals(0, fcView.getXAttrs(mountOnNn2).size());
assertEquals(0, fc2.getXAttrs(targetTestRoot2).size());
// Remove the XAttr entries on the first namespace
fcView.removeXAttr(mountOnNn1, name1);
fcView.removeXAttr(mountOnNn1, name2);
assertEquals(0, fcView.getXAttrs(mountOnNn1).size());
assertEquals(0, fc.getXAttrs(targetTestRoot).size());
// Now set XAttrs on the second namespace
fcView.setXAttr(mountOnNn2, name1, value1);
fcView.setXAttr(mountOnNn2, name2, value2);
assertEquals(2, fcView.getXAttrs(mountOnNn2).size());
assertArrayEquals(value1, fcView.getXAttr(mountOnNn2, name1));
assertArrayEquals(value2, fcView.getXAttr(mountOnNn2, name2));
assertArrayEquals(value1, fc2.getXAttr(targetTestRoot2, name1));
assertArrayEquals(value2, fc2.getXAttr(targetTestRoot2, name2));
fcView.removeXAttr(mountOnNn2, name1);
fcView.removeXAttr(mountOnNn2, name2);
assertEquals(0, fcView.getXAttrs(mountOnNn2).size());
assertEquals(0, fc2.getXAttrs(targetTestRoot2).size());
}
}
| 5,825 | 38.100671 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.viewfs;
/**
* The FileStatus is being serialized in MR as jobs are submitted.
* Since viewfs has overlayed ViewFsFileStatus, we ran into
* serialization problems. THis test is test the fix.
*/
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import javax.security.auth.login.LoginException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileChecksum;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestViewFsFileStatusHdfs {
static final String testfilename = "/tmp/testFileStatusSerialziation";
static final String someFile = "/hdfstmp/someFileForTestGetFileChecksum";
private static final FileSystemTestHelper fileSystemTestHelper = new FileSystemTestHelper();
private static MiniDFSCluster cluster;
private static Path defaultWorkingDirectory;
private static final Configuration CONF = new Configuration();
private static FileSystem fHdfs;
private static FileSystem vfs;
@BeforeClass
public static void clusterSetupAtBegining() throws IOException,
LoginException, URISyntaxException {
cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build();
cluster.waitClusterUp();
fHdfs = cluster.getFileSystem();
defaultWorkingDirectory = fHdfs.makeQualified( new Path("/user/" +
UserGroupInformation.getCurrentUser().getShortUserName()));
fHdfs.mkdirs(defaultWorkingDirectory);
// Setup the ViewFS to be used for all tests.
Configuration conf = ViewFileSystemTestSetup.createConfig();
ConfigUtil.addLink(conf, "/vfstmp", new URI(fHdfs.getUri() + "/hdfstmp"));
ConfigUtil.addLink(conf, "/tmp", new URI(fHdfs.getUri() + "/tmp"));
vfs = FileSystem.get(FsConstants.VIEWFS_URI, conf);
assertEquals(ViewFileSystem.class, vfs.getClass());
}
@Test
public void testFileStatusSerialziation()
throws IOException, URISyntaxException {
long len = fileSystemTestHelper.createFile(fHdfs, testfilename);
FileStatus stat = vfs.getFileStatus(new Path(testfilename));
assertEquals(len, stat.getLen());
// check serialization/deserialization
DataOutputBuffer dob = new DataOutputBuffer();
stat.write(dob);
DataInputBuffer dib = new DataInputBuffer();
dib.reset(dob.getData(), 0, dob.getLength());
FileStatus deSer = new FileStatus();
deSer.readFields(dib);
assertEquals(len, deSer.getLen());
}
@Test
public void testGetFileChecksum() throws IOException, URISyntaxException {
// Create two different files in HDFS
fileSystemTestHelper.createFile(fHdfs, someFile);
fileSystemTestHelper.createFile(fHdfs, fileSystemTestHelper
.getTestRootPath(fHdfs, someFile + "other"), 1, 512);
// Get checksum through ViewFS
FileChecksum viewFSCheckSum = vfs.getFileChecksum(
new Path("/vfstmp/someFileForTestGetFileChecksum"));
// Get checksum through HDFS.
FileChecksum hdfsCheckSum = fHdfs.getFileChecksum(
new Path(someFile));
// Get checksum of different file in HDFS
FileChecksum otherHdfsFileCheckSum = fHdfs.getFileChecksum(
new Path(someFile+"other"));
// Checksums of the same file (got through HDFS and ViewFS should be same)
assertEquals("HDFS and ViewFS checksums were not the same", viewFSCheckSum,
hdfsCheckSum);
// Checksum of different files should be different.
assertFalse("Some other HDFS file which should not have had the same " +
"checksum as viewFS did!", viewFSCheckSum.equals(otherHdfsFileCheckSum));
}
@AfterClass
public static void cleanup() throws IOException {
fHdfs.delete(new Path(testfilename), true);
fHdfs.delete(new Path(someFile), true);
fHdfs.delete(new Path(someFile + "other"), true);
}
}
| 5,099 | 39.15748 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/shell/TestHdfsTextCommand.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.shell;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.io.InputStream;
import java.io.StringWriter;
import java.lang.reflect.Method;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
/**
* This class tests the logic for displaying the binary formats supported
* by the Text command.
*/
public class TestHdfsTextCommand {
private static final String TEST_ROOT_DIR = "/build/test/data/testText";
private static final Path AVRO_FILENAME = new Path(TEST_ROOT_DIR, "weather.avro");
private static MiniDFSCluster cluster;
private static FileSystem fs;
@Before
public void setUp() throws IOException{
Configuration conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
fs = cluster.getFileSystem();
}
@After
public void tearDown() throws IOException{
if(fs != null){
fs.close();
}
if(cluster != null){
cluster.shutdown();
}
}
/**
* Tests whether binary Avro data files are displayed correctly.
*/
@Test
public void testDisplayForAvroFiles() throws Exception {
// Create a small Avro data file on the HDFS.
createAvroFile(generateWeatherAvroBinaryData());
// Prepare and call the Text command's protected getInputStream method
// using reflection.
Configuration conf = fs.getConf();
PathData pathData = new PathData(AVRO_FILENAME.toString(), conf);
Display.Text text = new Display.Text();
text.setConf(conf);
Method method = text.getClass().getDeclaredMethod(
"getInputStream", PathData.class);
method.setAccessible(true);
InputStream stream = (InputStream) method.invoke(text, pathData);
String output = inputStreamToString(stream);
// Check the output.
String expectedOutput =
"{\"station\":\"011990-99999\",\"time\":-619524000000,\"temp\":0}" +
System.getProperty("line.separator") +
"{\"station\":\"011990-99999\",\"time\":-619506000000,\"temp\":22}" +
System.getProperty("line.separator") +
"{\"station\":\"011990-99999\",\"time\":-619484400000,\"temp\":-11}" +
System.getProperty("line.separator") +
"{\"station\":\"012650-99999\",\"time\":-655531200000,\"temp\":111}" +
System.getProperty("line.separator") +
"{\"station\":\"012650-99999\",\"time\":-655509600000,\"temp\":78}" +
System.getProperty("line.separator");
assertEquals(expectedOutput, output);
}
private String inputStreamToString(InputStream stream) throws IOException {
StringWriter writer = new StringWriter();
IOUtils.copy(stream, writer);
return writer.toString();
}
private void createAvroFile(byte[] contents) throws IOException {
FSDataOutputStream stream = fs.create(AVRO_FILENAME);
stream.write(contents);
stream.close();
assertTrue(fs.exists(AVRO_FILENAME));
}
private byte[] generateWeatherAvroBinaryData() {
// The contents of a simple binary Avro file with weather records.
byte[] contents = {
(byte) 0x4f, (byte) 0x62, (byte) 0x6a, (byte) 0x1,
(byte) 0x4, (byte) 0x14, (byte) 0x61, (byte) 0x76,
(byte) 0x72, (byte) 0x6f, (byte) 0x2e, (byte) 0x63,
(byte) 0x6f, (byte) 0x64, (byte) 0x65, (byte) 0x63,
(byte) 0x8, (byte) 0x6e, (byte) 0x75, (byte) 0x6c,
(byte) 0x6c, (byte) 0x16, (byte) 0x61, (byte) 0x76,
(byte) 0x72, (byte) 0x6f, (byte) 0x2e, (byte) 0x73,
(byte) 0x63, (byte) 0x68, (byte) 0x65, (byte) 0x6d,
(byte) 0x61, (byte) 0xf2, (byte) 0x2, (byte) 0x7b,
(byte) 0x22, (byte) 0x74, (byte) 0x79, (byte) 0x70,
(byte) 0x65, (byte) 0x22, (byte) 0x3a, (byte) 0x22,
(byte) 0x72, (byte) 0x65, (byte) 0x63, (byte) 0x6f,
(byte) 0x72, (byte) 0x64, (byte) 0x22, (byte) 0x2c,
(byte) 0x22, (byte) 0x6e, (byte) 0x61, (byte) 0x6d,
(byte) 0x65, (byte) 0x22, (byte) 0x3a, (byte) 0x22,
(byte) 0x57, (byte) 0x65, (byte) 0x61, (byte) 0x74,
(byte) 0x68, (byte) 0x65, (byte) 0x72, (byte) 0x22,
(byte) 0x2c, (byte) 0x22, (byte) 0x6e, (byte) 0x61,
(byte) 0x6d, (byte) 0x65, (byte) 0x73, (byte) 0x70,
(byte) 0x61, (byte) 0x63, (byte) 0x65, (byte) 0x22,
(byte) 0x3a, (byte) 0x22, (byte) 0x74, (byte) 0x65,
(byte) 0x73, (byte) 0x74, (byte) 0x22, (byte) 0x2c,
(byte) 0x22, (byte) 0x66, (byte) 0x69, (byte) 0x65,
(byte) 0x6c, (byte) 0x64, (byte) 0x73, (byte) 0x22,
(byte) 0x3a, (byte) 0x5b, (byte) 0x7b, (byte) 0x22,
(byte) 0x6e, (byte) 0x61, (byte) 0x6d, (byte) 0x65,
(byte) 0x22, (byte) 0x3a, (byte) 0x22, (byte) 0x73,
(byte) 0x74, (byte) 0x61, (byte) 0x74, (byte) 0x69,
(byte) 0x6f, (byte) 0x6e, (byte) 0x22, (byte) 0x2c,
(byte) 0x22, (byte) 0x74, (byte) 0x79, (byte) 0x70,
(byte) 0x65, (byte) 0x22, (byte) 0x3a, (byte) 0x22,
(byte) 0x73, (byte) 0x74, (byte) 0x72, (byte) 0x69,
(byte) 0x6e, (byte) 0x67, (byte) 0x22, (byte) 0x7d,
(byte) 0x2c, (byte) 0x7b, (byte) 0x22, (byte) 0x6e,
(byte) 0x61, (byte) 0x6d, (byte) 0x65, (byte) 0x22,
(byte) 0x3a, (byte) 0x22, (byte) 0x74, (byte) 0x69,
(byte) 0x6d, (byte) 0x65, (byte) 0x22, (byte) 0x2c,
(byte) 0x22, (byte) 0x74, (byte) 0x79, (byte) 0x70,
(byte) 0x65, (byte) 0x22, (byte) 0x3a, (byte) 0x22,
(byte) 0x6c, (byte) 0x6f, (byte) 0x6e, (byte) 0x67,
(byte) 0x22, (byte) 0x7d, (byte) 0x2c, (byte) 0x7b,
(byte) 0x22, (byte) 0x6e, (byte) 0x61, (byte) 0x6d,
(byte) 0x65, (byte) 0x22, (byte) 0x3a, (byte) 0x22,
(byte) 0x74, (byte) 0x65, (byte) 0x6d, (byte) 0x70,
(byte) 0x22, (byte) 0x2c, (byte) 0x22, (byte) 0x74,
(byte) 0x79, (byte) 0x70, (byte) 0x65, (byte) 0x22,
(byte) 0x3a, (byte) 0x22, (byte) 0x69, (byte) 0x6e,
(byte) 0x74, (byte) 0x22, (byte) 0x7d, (byte) 0x5d,
(byte) 0x2c, (byte) 0x22, (byte) 0x64, (byte) 0x6f,
(byte) 0x63, (byte) 0x22, (byte) 0x3a, (byte) 0x22,
(byte) 0x41, (byte) 0x20, (byte) 0x77, (byte) 0x65,
(byte) 0x61, (byte) 0x74, (byte) 0x68, (byte) 0x65,
(byte) 0x72, (byte) 0x20, (byte) 0x72, (byte) 0x65,
(byte) 0x61, (byte) 0x64, (byte) 0x69, (byte) 0x6e,
(byte) 0x67, (byte) 0x2e, (byte) 0x22, (byte) 0x7d,
(byte) 0x0, (byte) 0xb0, (byte) 0x81, (byte) 0xb3,
(byte) 0xc4, (byte) 0xa, (byte) 0xc, (byte) 0xf6,
(byte) 0x62, (byte) 0xfa, (byte) 0xc9, (byte) 0x38,
(byte) 0xfd, (byte) 0x7e, (byte) 0x52, (byte) 0x0,
(byte) 0xa7, (byte) 0xa, (byte) 0xcc, (byte) 0x1,
(byte) 0x18, (byte) 0x30, (byte) 0x31, (byte) 0x31,
(byte) 0x39, (byte) 0x39, (byte) 0x30, (byte) 0x2d,
(byte) 0x39, (byte) 0x39, (byte) 0x39, (byte) 0x39,
(byte) 0x39, (byte) 0xff, (byte) 0xa3, (byte) 0x90,
(byte) 0xe8, (byte) 0x87, (byte) 0x24, (byte) 0x0,
(byte) 0x18, (byte) 0x30, (byte) 0x31, (byte) 0x31,
(byte) 0x39, (byte) 0x39, (byte) 0x30, (byte) 0x2d,
(byte) 0x39, (byte) 0x39, (byte) 0x39, (byte) 0x39,
(byte) 0x39, (byte) 0xff, (byte) 0x81, (byte) 0xfb,
(byte) 0xd6, (byte) 0x87, (byte) 0x24, (byte) 0x2c,
(byte) 0x18, (byte) 0x30, (byte) 0x31, (byte) 0x31,
(byte) 0x39, (byte) 0x39, (byte) 0x30, (byte) 0x2d,
(byte) 0x39, (byte) 0x39, (byte) 0x39, (byte) 0x39,
(byte) 0x39, (byte) 0xff, (byte) 0xa5, (byte) 0xae,
(byte) 0xc2, (byte) 0x87, (byte) 0x24, (byte) 0x15,
(byte) 0x18, (byte) 0x30, (byte) 0x31, (byte) 0x32,
(byte) 0x36, (byte) 0x35, (byte) 0x30, (byte) 0x2d,
(byte) 0x39, (byte) 0x39, (byte) 0x39, (byte) 0x39,
(byte) 0x39, (byte) 0xff, (byte) 0xb7, (byte) 0xa2,
(byte) 0x8b, (byte) 0x94, (byte) 0x26, (byte) 0xde,
(byte) 0x1, (byte) 0x18, (byte) 0x30, (byte) 0x31,
(byte) 0x32, (byte) 0x36, (byte) 0x35, (byte) 0x30,
(byte) 0x2d, (byte) 0x39, (byte) 0x39, (byte) 0x39,
(byte) 0x39, (byte) 0x39, (byte) 0xff, (byte) 0xdb,
(byte) 0xd5, (byte) 0xf6, (byte) 0x93, (byte) 0x26,
(byte) 0x9c, (byte) 0x1, (byte) 0xb0, (byte) 0x81,
(byte) 0xb3, (byte) 0xc4, (byte) 0xa, (byte) 0xc,
(byte) 0xf6, (byte) 0x62, (byte) 0xfa, (byte) 0xc9,
(byte) 0x38, (byte) 0xfd, (byte) 0x7e, (byte) 0x52,
(byte) 0x0, (byte) 0xa7,
};
return contents;
}
}
| 9,509 | 43.027778 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractSeek.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.hdfs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractSeekTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import java.io.IOException;
/**
* Test dir operations on a the local FS.
*/
public class TestHDFSContractSeek extends AbstractContractSeekTest {
@BeforeClass
public static void createCluster() throws IOException {
HDFSContract.createCluster();
}
@AfterClass
public static void teardownCluster() throws IOException {
HDFSContract.destroyCluster();
}
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new HDFSContract(conf);
}
}
| 1,587 | 30.76 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractCreate.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.hdfs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractCreateTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import java.io.IOException;
public class TestHDFSContractCreate extends AbstractContractCreateTest {
@BeforeClass
public static void createCluster() throws IOException {
HDFSContract.createCluster();
}
@AfterClass
public static void teardownCluster() throws IOException {
HDFSContract.destroyCluster();
}
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new HDFSContract(conf);
}
}
| 1,542 | 32.543478 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractConcat.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.hdfs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractConcatTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import java.io.IOException;
/**
* Test dir operations on a the local FS.
*/
public class TestHDFSContractConcat extends AbstractContractConcatTest {
@BeforeClass
public static void createCluster() throws IOException {
HDFSContract.createCluster();
// perform a simple operation on the cluster to verify it is up
HDFSContract.getCluster().getFileSystem().getDefaultBlockSize();
}
@AfterClass
public static void teardownCluster() throws IOException {
HDFSContract.destroyCluster();
}
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new HDFSContract(conf);
}
}
| 1,729 | 32.921569 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractMkdir.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.hdfs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractMkdirTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import java.io.IOException;
/**
* Test dir operations on a the local FS.
*/
public class TestHDFSContractMkdir extends AbstractContractMkdirTest {
@BeforeClass
public static void createCluster() throws IOException {
HDFSContract.createCluster();
}
@AfterClass
public static void teardownCluster() throws IOException {
HDFSContract.destroyCluster();
}
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new HDFSContract(conf);
}
}
| 1,589 | 31.44898 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractOpen.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.hdfs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractOpenTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import java.io.IOException;
/**
* Test dir operations on a the local FS.
*/
public class TestHDFSContractOpen extends AbstractContractOpenTest {
@BeforeClass
public static void createCluster() throws IOException {
HDFSContract.createCluster();
}
@AfterClass
public static void teardownCluster() throws IOException {
HDFSContract.destroyCluster();
}
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new HDFSContract(conf);
}
}
| 1,586 | 31.387755 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractAppend.java
|
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
package org.apache.hadoop.fs.contract.hdfs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractAppendTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import java.io.IOException;
public class TestHDFSContractAppend extends AbstractContractAppendTest {
@BeforeClass
public static void createCluster() throws IOException {
HDFSContract.createCluster();
}
@AfterClass
public static void teardownCluster() throws IOException {
HDFSContract.destroyCluster();
}
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new HDFSContract(conf);
}
}
| 1,329 | 30.666667 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractRootDirectory.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.hdfs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractRootDirectoryTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import java.io.IOException;
/**
* Test dir operations on a the local FS.
*/
public class TestHDFSContractRootDirectory extends
AbstractContractRootDirectoryTest {
@BeforeClass
public static void createCluster() throws IOException {
HDFSContract.createCluster();
}
@AfterClass
public static void teardownCluster() throws IOException {
HDFSContract.destroyCluster();
}
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new HDFSContract(conf);
}
}
| 1,617 | 31.36 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/HDFSContract.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.hdfs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.contract.AbstractFSContract;
import org.apache.hadoop.fs.contract.AbstractFSContractTestBase;
import org.apache.hadoop.fs.contract.ContractOptions;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.Assert;
import java.io.IOException;
/**
* The contract of HDFS
* This changes its feature set from platform for platform -the default
* set is updated during initialization.
*/
public class HDFSContract extends AbstractFSContract {
public static final String CONTRACT_HDFS_XML = "contract/hdfs.xml";
public static final int BLOCK_SIZE = AbstractFSContractTestBase.TEST_FILE_LEN;
private static MiniDFSCluster cluster;
public HDFSContract(Configuration conf) {
super(conf);
//insert the base features
addConfResource(CONTRACT_HDFS_XML);
}
public static void createCluster() throws IOException {
HdfsConfiguration conf = new HdfsConfiguration();
conf.addResource(CONTRACT_HDFS_XML);
//hack in a 256 byte block size
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
cluster.waitClusterUp();
}
public static void destroyCluster() throws IOException {
if (cluster != null) {
cluster.shutdown();
}
}
public static MiniDFSCluster getCluster() {
return cluster;
}
@Override
public void init() throws IOException {
super.init();
Assert.assertTrue("contract options not loaded",
isSupported(ContractOptions.IS_CASE_SENSITIVE, false));
}
@Override
public FileSystem getTestFileSystem() throws IOException {
//assumes cluster is not null
Assert.assertNotNull("cluster not created", cluster);
return cluster.getFileSystem();
}
@Override
public String getScheme() {
return "hdfs";
}
@Override
public Path getTestPath() {
Path path = new Path("/test");
return path;
}
}
| 3,025 | 30.195876 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractDelete.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.hdfs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractDeleteTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import java.io.IOException;
/**
* Test dir operations on a the local FS.
*/
public class TestHDFSContractDelete extends AbstractContractDeleteTest {
@BeforeClass
public static void createCluster() throws IOException {
HDFSContract.createCluster();
}
@AfterClass
public static void teardownCluster() throws IOException {
HDFSContract.destroyCluster();
}
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new HDFSContract(conf);
}
}
| 1,592 | 31.510204 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractRename.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract.hdfs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.contract.AbstractContractRenameTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import java.io.IOException;
public class TestHDFSContractRename extends AbstractContractRenameTest {
@BeforeClass
public static void createCluster() throws IOException {
HDFSContract.createCluster();
}
@AfterClass
public static void teardownCluster() throws IOException {
HDFSContract.destroyCluster();
}
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new HDFSContract(conf);
}
}
| 1,542 | 32.543478 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.net;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.util.HashMap;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
public class TestNetworkTopology {
private static final Log LOG = LogFactory.getLog(TestNetworkTopology.class);
private final static NetworkTopology cluster = new NetworkTopology();
private DatanodeDescriptor dataNodes[];
@Before
public void setupDatanodes() {
dataNodes = new DatanodeDescriptor[] {
DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/d1/r1"),
DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/d1/r1"),
DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1/r2"),
DFSTestUtil.getDatanodeDescriptor("4.4.4.4", "/d1/r2"),
DFSTestUtil.getDatanodeDescriptor("5.5.5.5", "/d1/r2"),
DFSTestUtil.getDatanodeDescriptor("6.6.6.6", "/d2/r3"),
DFSTestUtil.getDatanodeDescriptor("7.7.7.7", "/d2/r3"),
DFSTestUtil.getDatanodeDescriptor("8.8.8.8", "/d2/r3"),
DFSTestUtil.getDatanodeDescriptor("9.9.9.9", "/d3/r1"),
DFSTestUtil.getDatanodeDescriptor("10.10.10.10", "/d3/r1"),
DFSTestUtil.getDatanodeDescriptor("11.11.11.11", "/d3/r1"),
DFSTestUtil.getDatanodeDescriptor("12.12.12.12", "/d3/r2"),
DFSTestUtil.getDatanodeDescriptor("13.13.13.13", "/d3/r2"),
DFSTestUtil.getDatanodeDescriptor("14.14.14.14", "/d4/r1"),
DFSTestUtil.getDatanodeDescriptor("15.15.15.15", "/d4/r1"),
DFSTestUtil.getDatanodeDescriptor("16.16.16.16", "/d4/r1"),
DFSTestUtil.getDatanodeDescriptor("17.17.17.17", "/d4/r1"),
DFSTestUtil.getDatanodeDescriptor("18.18.18.18", "/d4/r1"),
DFSTestUtil.getDatanodeDescriptor("19.19.19.19", "/d4/r1"),
DFSTestUtil.getDatanodeDescriptor("20.20.20.20", "/d4/r1"),
};
for (int i = 0; i < dataNodes.length; i++) {
cluster.add(dataNodes[i]);
}
dataNodes[9].setDecommissioned();
dataNodes[10].setDecommissioned();
}
@Test
public void testContains() throws Exception {
DatanodeDescriptor nodeNotInMap =
DFSTestUtil.getDatanodeDescriptor("8.8.8.8", "/d2/r4");
for (int i=0; i < dataNodes.length; i++) {
assertTrue(cluster.contains(dataNodes[i]));
}
assertFalse(cluster.contains(nodeNotInMap));
}
@Test
public void testNumOfChildren() throws Exception {
assertEquals(cluster.getNumOfLeaves(), dataNodes.length);
}
@Test
public void testCreateInvalidTopology() throws Exception {
NetworkTopology invalCluster = new NetworkTopology();
DatanodeDescriptor invalDataNodes[] = new DatanodeDescriptor[] {
DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/d1/r1"),
DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/d1/r1"),
DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1")
};
invalCluster.add(invalDataNodes[0]);
invalCluster.add(invalDataNodes[1]);
try {
invalCluster.add(invalDataNodes[2]);
fail("expected InvalidTopologyException");
} catch (NetworkTopology.InvalidTopologyException e) {
assertTrue(e.getMessage().startsWith("Failed to add "));
assertTrue(e.getMessage().contains(
"You cannot have a rack and a non-rack node at the same " +
"level of the network topology."));
}
}
@Test
public void testRacks() throws Exception {
assertEquals(cluster.getNumOfRacks(), 6);
assertTrue(cluster.isOnSameRack(dataNodes[0], dataNodes[1]));
assertFalse(cluster.isOnSameRack(dataNodes[1], dataNodes[2]));
assertTrue(cluster.isOnSameRack(dataNodes[2], dataNodes[3]));
assertTrue(cluster.isOnSameRack(dataNodes[3], dataNodes[4]));
assertFalse(cluster.isOnSameRack(dataNodes[4], dataNodes[5]));
assertTrue(cluster.isOnSameRack(dataNodes[5], dataNodes[6]));
}
@Test
public void testGetDistance() throws Exception {
assertEquals(cluster.getDistance(dataNodes[0], dataNodes[0]), 0);
assertEquals(cluster.getDistance(dataNodes[0], dataNodes[1]), 2);
assertEquals(cluster.getDistance(dataNodes[0], dataNodes[3]), 4);
assertEquals(cluster.getDistance(dataNodes[0], dataNodes[6]), 6);
}
@Test
public void testSortByDistance() throws Exception {
DatanodeDescriptor[] testNodes = new DatanodeDescriptor[3];
// array contains both local node & local rack node
testNodes[0] = dataNodes[1];
testNodes[1] = dataNodes[2];
testNodes[2] = dataNodes[0];
cluster.setRandomSeed(0xDEADBEEF);
cluster.sortByDistance(dataNodes[0], testNodes, testNodes.length);
assertTrue(testNodes[0] == dataNodes[0]);
assertTrue(testNodes[1] == dataNodes[1]);
assertTrue(testNodes[2] == dataNodes[2]);
// array contains both local node & local rack node & decommissioned node
DatanodeDescriptor[] dtestNodes = new DatanodeDescriptor[5];
dtestNodes[0] = dataNodes[8];
dtestNodes[1] = dataNodes[12];
dtestNodes[2] = dataNodes[11];
dtestNodes[3] = dataNodes[9];
dtestNodes[4] = dataNodes[10];
cluster.setRandomSeed(0xDEADBEEF);
cluster.sortByDistance(dataNodes[8], dtestNodes, dtestNodes.length - 2);
assertTrue(dtestNodes[0] == dataNodes[8]);
assertTrue(dtestNodes[1] == dataNodes[11]);
assertTrue(dtestNodes[2] == dataNodes[12]);
assertTrue(dtestNodes[3] == dataNodes[9]);
assertTrue(dtestNodes[4] == dataNodes[10]);
// array contains local node
testNodes[0] = dataNodes[1];
testNodes[1] = dataNodes[3];
testNodes[2] = dataNodes[0];
cluster.setRandomSeed(0xDEADBEEF);
cluster.sortByDistance(dataNodes[0], testNodes, testNodes.length);
assertTrue(testNodes[0] == dataNodes[0]);
assertTrue(testNodes[1] == dataNodes[1]);
assertTrue(testNodes[2] == dataNodes[3]);
// array contains local rack node
testNodes[0] = dataNodes[5];
testNodes[1] = dataNodes[3];
testNodes[2] = dataNodes[1];
cluster.setRandomSeed(0xDEADBEEF);
cluster.sortByDistance(dataNodes[0], testNodes, testNodes.length);
assertTrue(testNodes[0] == dataNodes[1]);
assertTrue(testNodes[1] == dataNodes[3]);
assertTrue(testNodes[2] == dataNodes[5]);
// array contains local rack node which happens to be in position 0
testNodes[0] = dataNodes[1];
testNodes[1] = dataNodes[5];
testNodes[2] = dataNodes[3];
cluster.setRandomSeed(0xDEADBEEF);
cluster.sortByDistance(dataNodes[0], testNodes, testNodes.length);
assertTrue(testNodes[0] == dataNodes[1]);
assertTrue(testNodes[1] == dataNodes[3]);
assertTrue(testNodes[2] == dataNodes[5]);
// Same as previous, but with a different random seed to test randomization
testNodes[0] = dataNodes[1];
testNodes[1] = dataNodes[5];
testNodes[2] = dataNodes[3];
cluster.setRandomSeed(0xDEAD);
cluster.sortByDistance(dataNodes[0], testNodes, testNodes.length);
// sortByDistance does not take the "data center" layer into consideration
// and it doesn't sort by getDistance, so 1, 5, 3 is also valid here
assertTrue(testNodes[0] == dataNodes[1]);
assertTrue(testNodes[1] == dataNodes[5]);
assertTrue(testNodes[2] == dataNodes[3]);
// Array of just rack-local nodes
// Expect a random first node
DatanodeDescriptor first = null;
boolean foundRandom = false;
for (int i=5; i<=7; i++) {
testNodes[0] = dataNodes[5];
testNodes[1] = dataNodes[6];
testNodes[2] = dataNodes[7];
cluster.sortByDistance(dataNodes[i], testNodes, testNodes.length);
if (first == null) {
first = testNodes[0];
} else {
if (first != testNodes[0]) {
foundRandom = true;
break;
}
}
}
assertTrue("Expected to find a different first location", foundRandom);
// Array of just remote nodes
// Expect random first node
first = null;
for (int i = 1; i <= 4; i++) {
testNodes[0] = dataNodes[13];
testNodes[1] = dataNodes[14];
testNodes[2] = dataNodes[15];
cluster.sortByDistance(dataNodes[i], testNodes, testNodes.length);
if (first == null) {
first = testNodes[0];
} else {
if (first != testNodes[0]) {
foundRandom = true;
break;
}
}
}
assertTrue("Expected to find a different first location", foundRandom);
}
@Test
public void testRemove() throws Exception {
for(int i=0; i<dataNodes.length; i++) {
cluster.remove(dataNodes[i]);
}
for(int i=0; i<dataNodes.length; i++) {
assertFalse(cluster.contains(dataNodes[i]));
}
assertEquals(0, cluster.getNumOfLeaves());
for(int i=0; i<dataNodes.length; i++) {
cluster.add(dataNodes[i]);
}
}
/**
* This picks a large number of nodes at random in order to ensure coverage
*
* @param numNodes the number of nodes
* @param excludedScope the excluded scope
* @return the frequency that nodes were chosen
*/
private Map<Node, Integer> pickNodesAtRandom(int numNodes,
String excludedScope) {
Map<Node, Integer> frequency = new HashMap<Node, Integer>();
for (DatanodeDescriptor dnd : dataNodes) {
frequency.put(dnd, 0);
}
for (int j = 0; j < numNodes; j++) {
Node random = cluster.chooseRandom(excludedScope);
frequency.put(random, frequency.get(random) + 1);
}
return frequency;
}
/**
* This test checks that chooseRandom works for an excluded node.
*/
@Test
public void testChooseRandomExcludedNode() {
String scope = "~" + NodeBase.getPath(dataNodes[0]);
Map<Node, Integer> frequency = pickNodesAtRandom(100, scope);
for (Node key : dataNodes) {
// all nodes except the first should be more than zero
assertTrue(frequency.get(key) > 0 || key == dataNodes[0]);
}
}
/**
* This test checks that chooseRandom works for an excluded rack.
*/
@Test
public void testChooseRandomExcludedRack() {
Map<Node, Integer> frequency = pickNodesAtRandom(100, "~" + "/d2");
// all the nodes on the second rack should be zero
for (int j = 0; j < dataNodes.length; j++) {
int freq = frequency.get(dataNodes[j]);
if (dataNodes[j].getNetworkLocation().startsWith("/d2")) {
assertEquals(0, freq);
} else {
assertTrue(freq > 0);
}
}
}
@Test(timeout=180000)
public void testInvalidNetworkTopologiesNotCachedInHdfs() throws Exception {
// start a cluster
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
try {
// bad rack topology
String racks[] = { "/a/b", "/c" };
String hosts[] = { "foo1.example.com", "foo2.example.com" };
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).
racks(racks).hosts(hosts).build();
cluster.waitActive();
NamenodeProtocols nn = cluster.getNameNodeRpc();
Assert.assertNotNull(nn);
// Wait for one DataNode to register.
// The other DataNode will not be able to register up because of the rack mismatch.
DatanodeInfo[] info;
while (true) {
info = nn.getDatanodeReport(DatanodeReportType.LIVE);
Assert.assertFalse(info.length == 2);
if (info.length == 1) {
break;
}
Thread.sleep(1000);
}
// Set the network topology of the other node to the match the network
// topology of the node that came up.
int validIdx = info[0].getHostName().equals(hosts[0]) ? 0 : 1;
int invalidIdx = validIdx == 1 ? 0 : 1;
StaticMapping.addNodeToRack(hosts[invalidIdx], racks[validIdx]);
LOG.info("datanode " + validIdx + " came up with network location " +
info[0].getNetworkLocation());
// Restart the DN with the invalid topology and wait for it to register.
cluster.restartDataNode(invalidIdx);
Thread.sleep(5000);
while (true) {
info = nn.getDatanodeReport(DatanodeReportType.LIVE);
if (info.length == 2) {
break;
}
if (info.length == 0) {
LOG.info("got no valid DNs");
} else if (info.length == 1) {
LOG.info("got one valid DN: " + info[0].getHostName() +
" (at " + info[0].getNetworkLocation() + ")");
}
Thread.sleep(1000);
}
Assert.assertEquals(info[0].getNetworkLocation(),
info[1].getNetworkLocation());
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
}
| 13,959 | 36.526882 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTraceAdmin.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tracing;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.net.unix.TemporarySocketDirectory;
import org.junit.Assert;
import org.junit.Test;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.PrintStream;
public class TestTraceAdmin {
private static final String NEWLINE = System.getProperty("line.separator");
private String runTraceCommand(TraceAdmin trace, String... cmd)
throws Exception {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
PrintStream ps = new PrintStream(baos);
PrintStream oldStdout = System.out;
PrintStream oldStderr = System.err;
System.setOut(ps);
System.setErr(ps);
int ret = -1;
try {
ret = trace.run(cmd);
} finally {
System.out.flush();
System.setOut(oldStdout);
System.setErr(oldStderr);
}
return "ret:" + ret + ", " + baos.toString();
}
private String getHostPortForNN(MiniDFSCluster cluster) {
return "127.0.0.1:" + cluster.getNameNodePort();
}
@Test
public void testCreateAndDestroySpanReceiver() throws Exception {
Configuration conf = new Configuration();
conf = new Configuration();
conf.set(DFSConfigKeys.DFS_SERVER_HTRACE_PREFIX +
SpanReceiverHost.SPAN_RECEIVERS_CONF_SUFFIX, "");
MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
TemporarySocketDirectory tempDir = new TemporarySocketDirectory();
String tracePath =
new File(tempDir.getDir(), "tracefile").getAbsolutePath();
try {
TraceAdmin trace = new TraceAdmin();
trace.setConf(conf);
Assert.assertEquals("ret:0, [no span receivers found]" + NEWLINE,
runTraceCommand(trace, "-list", "-host", getHostPortForNN(cluster)));
Assert.assertEquals("ret:0, Added trace span receiver 1 with " +
"configuration dfs.htrace.local-file-span-receiver.path = " + tracePath + NEWLINE,
runTraceCommand(trace, "-add", "-host", getHostPortForNN(cluster),
"-class", "org.apache.htrace.impl.LocalFileSpanReceiver",
"-Cdfs.htrace.local-file-span-receiver.path=" + tracePath));
String list =
runTraceCommand(trace, "-list", "-host", getHostPortForNN(cluster));
Assert.assertTrue(list.startsWith("ret:0"));
Assert.assertTrue(list.contains("1 org.apache.htrace.impl.LocalFileSpanReceiver"));
Assert.assertEquals("ret:0, Removed trace span receiver 1" + NEWLINE,
runTraceCommand(trace, "-remove", "1", "-host",
getHostPortForNN(cluster)));
Assert.assertEquals("ret:0, [no span receivers found]" + NEWLINE,
runTraceCommand(trace, "-list", "-host", getHostPortForNN(cluster)));
Assert.assertEquals("ret:0, Added trace span receiver 2 with " +
"configuration dfs.htrace.local-file-span-receiver.path = " + tracePath + NEWLINE,
runTraceCommand(trace, "-add", "-host", getHostPortForNN(cluster),
"-class", "LocalFileSpanReceiver",
"-Cdfs.htrace.local-file-span-receiver.path=" + tracePath));
Assert.assertEquals("ret:0, Removed trace span receiver 2" + NEWLINE,
runTraceCommand(trace, "-remove", "2", "-host",
getHostPortForNN(cluster)));
} finally {
cluster.shutdown();
tempDir.close();
}
}
}
| 4,308 | 41.245098 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracingShortCircuitLocalRead.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tracing;
import static org.junit.Assume.assumeTrue;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.unix.DomainSocket;
import org.apache.hadoop.net.unix.TemporarySocketDirectory;
import org.apache.hadoop.util.NativeCodeLoader;
import org.apache.htrace.Sampler;
import org.apache.htrace.Trace;
import org.apache.htrace.TraceScope;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestTracingShortCircuitLocalRead {
private static Configuration conf;
private static MiniDFSCluster cluster;
private static DistributedFileSystem dfs;
private static SpanReceiverHost spanReceiverHost;
private static TemporarySocketDirectory sockDir;
static final Path TEST_PATH = new Path("testShortCircuitTraceHooks");
static final int TEST_LENGTH = 1234;
@BeforeClass
public static void init() {
sockDir = new TemporarySocketDirectory();
DomainSocket.disableBindPathValidation();
}
@AfterClass
public static void shutdown() throws IOException {
sockDir.close();
}
@Test
public void testShortCircuitTraceHooks() throws IOException {
assumeTrue(NativeCodeLoader.isNativeCodeLoaded() && !Path.WINDOWS);
conf = new Configuration();
conf.set(DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX +
SpanReceiverHost.SPAN_RECEIVERS_CONF_SUFFIX,
SetSpanReceiver.class.getName());
conf.setLong("dfs.blocksize", 100 * 1024);
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY, false);
conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
"testShortCircuitTraceHooks._PORT");
conf.set(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY, "CRC32C");
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(1)
.build();
dfs = cluster.getFileSystem();
try {
DFSTestUtil.createFile(dfs, TEST_PATH, TEST_LENGTH, (short)1, 5678L);
TraceScope ts = Trace.startSpan("testShortCircuitTraceHooks", Sampler.ALWAYS);
FSDataInputStream stream = dfs.open(TEST_PATH);
byte buf[] = new byte[TEST_LENGTH];
IOUtils.readFully(stream, buf, 0, TEST_LENGTH);
stream.close();
ts.close();
String[] expectedSpanNames = {
"OpRequestShortCircuitAccessProto",
"ShortCircuitShmRequestProto"
};
SetSpanReceiver.assertSpanNamesFound(expectedSpanNames);
} finally {
dfs.close();
cluster.shutdown();
}
}
}
| 3,730 | 35.578431 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tracing;
import org.apache.commons.lang.RandomStringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.htrace.Sampler;
import org.apache.htrace.Span;
import org.apache.htrace.Trace;
import org.apache.htrace.TraceScope;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.List;
import java.util.Map;
public class TestTracing {
private static Configuration conf;
private static MiniDFSCluster cluster;
private static DistributedFileSystem dfs;
@Test
public void testTracing() throws Exception {
// write and read without tracing started
String fileName = "testTracingDisabled.dat";
writeTestFile(fileName);
Assert.assertTrue(SetSpanReceiver.size() == 0);
readTestFile(fileName);
Assert.assertTrue(SetSpanReceiver.size() == 0);
writeWithTracing();
readWithTracing();
}
public void writeWithTracing() throws Exception {
long startTime = System.currentTimeMillis();
TraceScope ts = Trace.startSpan("testWriteTraceHooks", Sampler.ALWAYS);
writeTestFile("testWriteTraceHooks.dat");
long endTime = System.currentTimeMillis();
ts.close();
String[] expectedSpanNames = {
"testWriteTraceHooks",
"ClientProtocol#create",
"ClientNamenodeProtocol#create",
"ClientProtocol#fsync",
"ClientNamenodeProtocol#fsync",
"ClientProtocol#complete",
"ClientNamenodeProtocol#complete",
"newStreamForCreate",
"DFSOutputStream#write",
"DFSOutputStream#close",
"dataStreamer",
"OpWriteBlockProto",
"ClientProtocol#addBlock",
"ClientNamenodeProtocol#addBlock"
};
SetSpanReceiver.assertSpanNamesFound(expectedSpanNames);
// The trace should last about the same amount of time as the test
Map<String, List<Span>> map = SetSpanReceiver.getMap();
Span s = map.get("testWriteTraceHooks").get(0);
Assert.assertNotNull(s);
long spanStart = s.getStartTimeMillis();
long spanEnd = s.getStopTimeMillis();
// Spans homed in the top trace shoud have same trace id.
// Spans having multiple parents (e.g. "dataStreamer" added by HDFS-7054)
// and children of them are exception.
String[] spansInTopTrace = {
"testWriteTraceHooks",
"ClientProtocol#create",
"ClientNamenodeProtocol#create",
"ClientProtocol#fsync",
"ClientNamenodeProtocol#fsync",
"ClientProtocol#complete",
"ClientNamenodeProtocol#complete",
"newStreamForCreate",
"DFSOutputStream#write",
"DFSOutputStream#close",
};
for (String desc : spansInTopTrace) {
for (Span span : map.get(desc)) {
Assert.assertEquals(ts.getSpan().getTraceId(), span.getTraceId());
}
}
// test for timeline annotation added by HADOOP-11242
Assert.assertEquals("called",
map.get("ClientProtocol#create")
.get(0).getTimelineAnnotations()
.get(0).getMessage());
SetSpanReceiver.clear();
}
public void readWithTracing() throws Exception {
String fileName = "testReadTraceHooks.dat";
writeTestFile(fileName);
long startTime = System.currentTimeMillis();
TraceScope ts = Trace.startSpan("testReadTraceHooks", Sampler.ALWAYS);
readTestFile(fileName);
ts.close();
long endTime = System.currentTimeMillis();
String[] expectedSpanNames = {
"testReadTraceHooks",
"ClientProtocol#getBlockLocations",
"ClientNamenodeProtocol#getBlockLocations",
"OpReadBlockProto"
};
SetSpanReceiver.assertSpanNamesFound(expectedSpanNames);
// The trace should last about the same amount of time as the test
Map<String, List<Span>> map = SetSpanReceiver.getMap();
Span s = map.get("testReadTraceHooks").get(0);
Assert.assertNotNull(s);
long spanStart = s.getStartTimeMillis();
long spanEnd = s.getStopTimeMillis();
Assert.assertTrue(spanStart - startTime < 100);
Assert.assertTrue(spanEnd - endTime < 100);
// There should only be one trace id as it should all be homed in the
// top trace.
for (Span span : SetSpanReceiver.getSpans()) {
Assert.assertEquals(ts.getSpan().getTraceId(), span.getTraceId());
}
SetSpanReceiver.clear();
}
private void writeTestFile(String testFileName) throws Exception {
Path filePath = new Path(testFileName);
FSDataOutputStream stream = dfs.create(filePath);
for (int i = 0; i < 10; i++) {
byte[] data = RandomStringUtils.randomAlphabetic(102400).getBytes();
stream.write(data);
}
stream.hsync();
stream.close();
}
private void readTestFile(String testFileName) throws Exception {
Path filePath = new Path(testFileName);
FSDataInputStream istream = dfs.open(filePath, 10240);
ByteBuffer buf = ByteBuffer.allocate(10240);
int count = 0;
try {
while (istream.read(buf) > 0) {
count += 1;
buf.clear();
istream.seek(istream.getPos() + 5);
}
} catch (IOException ioe) {
// Ignore this it's probably a seek after eof.
} finally {
istream.close();
}
}
@BeforeClass
public static void setup() throws IOException {
conf = new Configuration();
conf.setLong("dfs.blocksize", 100 * 1024);
conf.set(DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX +
SpanReceiverHost.SPAN_RECEIVERS_CONF_SUFFIX,
SetSpanReceiver.class.getName());
}
@Before
public void startCluster() throws IOException {
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(3)
.build();
cluster.waitActive();
dfs = cluster.getFileSystem();
SetSpanReceiver.clear();
}
@After
public void shutDown() throws IOException {
cluster.shutdown();
}
}
| 6,953 | 31.647887 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSNNTopology.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
/**
* This class is used to specify the setup of namenodes when instantiating
* a MiniDFSCluster. It consists of a set of nameservices, each of which
* may have one or more namenodes (in the case of HA)
*/
@InterfaceAudience.LimitedPrivate({"HBase", "HDFS", "Hive", "MapReduce", "Pig"})
@InterfaceStability.Unstable
public class MiniDFSNNTopology {
private final List<NSConf> nameservices = Lists.newArrayList();
private boolean federation;
public MiniDFSNNTopology() {
}
/**
* Set up a simple non-federated non-HA NN.
*/
public static MiniDFSNNTopology simpleSingleNN(
int nameNodePort, int nameNodeHttpPort) {
return new MiniDFSNNTopology()
.addNameservice(new MiniDFSNNTopology.NSConf(null)
.addNN(new MiniDFSNNTopology.NNConf(null)
.setHttpPort(nameNodeHttpPort)
.setIpcPort(nameNodePort)));
}
/**
* Set up an HA topology with a single HA nameservice.
*/
public static MiniDFSNNTopology simpleHATopology() {
return new MiniDFSNNTopology()
.addNameservice(new MiniDFSNNTopology.NSConf("minidfs-ns")
.addNN(new MiniDFSNNTopology.NNConf("nn1"))
.addNN(new MiniDFSNNTopology.NNConf("nn2")));
}
/**
* Set up federated cluster with the given number of nameservices, each
* of which has only a single NameNode.
*/
public static MiniDFSNNTopology simpleFederatedTopology(
int numNameservices) {
MiniDFSNNTopology topology = new MiniDFSNNTopology();
for (int i = 1; i <= numNameservices; i++) {
topology.addNameservice(new MiniDFSNNTopology.NSConf("ns" + i)
.addNN(new MiniDFSNNTopology.NNConf(null)));
}
topology.setFederation(true);
return topology;
}
/**
* Set up federated cluster with the given nameservices, each
* of which has only a single NameNode.
*/
public static MiniDFSNNTopology simpleFederatedTopology(String nameservicesIds) {
MiniDFSNNTopology topology = new MiniDFSNNTopology();
String nsIds[] = nameservicesIds.split(",");
for (String nsId : nsIds) {
topology.addNameservice(new MiniDFSNNTopology.NSConf(nsId)
.addNN(new MiniDFSNNTopology.NNConf(null)));
}
topology.setFederation(true);
return topology;
}
/**
* Set up federated cluster with the given number of nameservices, each
* of which has two NameNodes.
*/
public static MiniDFSNNTopology simpleHAFederatedTopology(
int numNameservices) {
MiniDFSNNTopology topology = new MiniDFSNNTopology();
for (int i = 0; i < numNameservices; i++) {
topology.addNameservice(new MiniDFSNNTopology.NSConf("ns" + i)
.addNN(new MiniDFSNNTopology.NNConf("nn0"))
.addNN(new MiniDFSNNTopology.NNConf("nn1")));
}
topology.setFederation(true);
return topology;
}
public MiniDFSNNTopology setFederation(boolean federation) {
this.federation = federation;
return this;
}
public MiniDFSNNTopology addNameservice(NSConf nameservice) {
Preconditions.checkArgument(!nameservice.getNNs().isEmpty(),
"Must have at least one NN in a nameservice");
this.nameservices.add(nameservice);
return this;
}
public int countNameNodes() {
int count = 0;
for (NSConf ns : nameservices) {
count += ns.nns.size();
}
return count;
}
public NNConf getOnlyNameNode() {
Preconditions.checkState(countNameNodes() == 1,
"must have exactly one NN!");
return nameservices.get(0).getNNs().get(0);
}
public boolean isFederated() {
return nameservices.size() > 1 || federation;
}
/**
* @return true if at least one of the nameservices
* in the topology has HA enabled.
*/
public boolean isHA() {
for (NSConf ns : nameservices) {
if (ns.getNNs().size() > 1) {
return true;
}
}
return false;
}
/**
* @return true if all of the NNs in the cluster have their HTTP
* port specified to be non-ephemeral.
*/
public boolean allHttpPortsSpecified() {
for (NSConf ns : nameservices) {
for (NNConf nn : ns.getNNs()) {
if (nn.getHttpPort() == 0) {
return false;
}
}
}
return true;
}
/**
* @return true if all of the NNs in the cluster have their IPC
* port specified to be non-ephemeral.
*/
public boolean allIpcPortsSpecified() {
for (NSConf ns : nameservices) {
for (NNConf nn : ns.getNNs()) {
if (nn.getIpcPort() == 0) {
return false;
}
}
}
return true;
}
public List<NSConf> getNameservices() {
return nameservices;
}
public static class NSConf {
private final String id;
private final List<NNConf> nns = Lists.newArrayList();
public NSConf(String id) {
this.id = id;
}
public NSConf addNN(NNConf nn) {
this.nns.add(nn);
return this;
}
public String getId() {
return id;
}
public List<NNConf> getNNs() {
return nns;
}
}
public static class NNConf {
private final String nnId;
private int httpPort;
private int ipcPort;
private String clusterId;
public NNConf(String nnId) {
this.nnId = nnId;
}
String getNnId() {
return nnId;
}
int getIpcPort() {
return ipcPort;
}
int getHttpPort() {
return httpPort;
}
String getClusterId() {
return clusterId;
}
public NNConf setHttpPort(int httpPort) {
this.httpPort = httpPort;
return this;
}
public NNConf setIpcPort(int ipcPort) {
this.ipcPort = ipcPort;
return this;
}
public NNConf setClusterId(String clusterId) {
this.clusterId = clusterId;
return this;
}
}
}
| 6,817 | 25.948617 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Random;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.apache.hadoop.io.IOUtils;
import org.apache.log4j.Level;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
/**
* This class tests the DFS positional read functionality in a single node
* mini-cluster.
*/
public class TestPread {
static final long seed = 0xDEADBEEFL;
static final int blockSize = 4096;
static final int numBlocksPerFile = 12;
static final int fileSize = numBlocksPerFile * blockSize;
boolean simulatedStorage;
boolean isHedgedRead;
@Before
public void setup() {
simulatedStorage = false;
isHedgedRead = false;
}
private void writeFile(FileSystem fileSys, Path name) throws IOException {
int replication = 3;// We need > 1 blocks to test out the hedged reads.
// create and write a file that contains three blocks of data
DataOutputStream stm = fileSys.create(name, true, 4096,
(short)replication, blockSize);
// test empty file open and read
stm.close();
FSDataInputStream in = fileSys.open(name);
byte[] buffer = new byte[fileSize];
in.readFully(0, buffer, 0, 0);
IOException res = null;
try { // read beyond the end of the file
in.readFully(0, buffer, 0, 1);
} catch (IOException e) {
// should throw an exception
res = e;
}
assertTrue("Error reading beyond file boundary.", res != null);
in.close();
if (!fileSys.delete(name, true))
assertTrue("Cannot delete file", false);
// now create the real file
DFSTestUtil.createFile(fileSys, name, fileSize, fileSize,
blockSize, (short) replication, seed);
}
private void checkAndEraseData(byte[] actual, int from, byte[] expected, String message) {
for (int idx = 0; idx < actual.length; idx++) {
assertEquals(message+" byte "+(from+idx)+" differs. expected "+
expected[from+idx]+" actual "+actual[idx],
actual[idx], expected[from+idx]);
actual[idx] = 0;
}
}
private void doPread(FSDataInputStream stm, long position, byte[] buffer,
int offset, int length) throws IOException {
int nread = 0;
long totalRead = 0;
DFSInputStream dfstm = null;
if (stm.getWrappedStream() instanceof DFSInputStream) {
dfstm = (DFSInputStream) (stm.getWrappedStream());
totalRead = dfstm.getReadStatistics().getTotalBytesRead();
}
while (nread < length) {
int nbytes =
stm.read(position + nread, buffer, offset + nread, length - nread);
assertTrue("Error in pread", nbytes > 0);
nread += nbytes;
}
if (dfstm != null) {
if (isHedgedRead) {
assertTrue("Expected read statistic to be incremented", length <= dfstm
.getReadStatistics().getTotalBytesRead() - totalRead);
} else {
assertEquals("Expected read statistic to be incremented", length, dfstm
.getReadStatistics().getTotalBytesRead() - totalRead);
}
}
}
private void pReadFile(FileSystem fileSys, Path name) throws IOException {
FSDataInputStream stm = fileSys.open(name);
byte[] expected = new byte[fileSize];
if (simulatedStorage) {
assert fileSys instanceof DistributedFileSystem;
DistributedFileSystem dfs = (DistributedFileSystem) fileSys;
LocatedBlocks lbs = dfs.getClient().getLocatedBlocks(name.toString(),
0, fileSize);
DFSTestUtil.fillExpectedBuf(lbs, expected);
} else {
Random rand = new Random(seed);
rand.nextBytes(expected);
}
// do a sanity check. Read first 4K bytes
byte[] actual = new byte[4096];
stm.readFully(actual);
checkAndEraseData(actual, 0, expected, "Read Sanity Test");
// now do a pread for the first 8K bytes
actual = new byte[8192];
doPread(stm, 0L, actual, 0, 8192);
checkAndEraseData(actual, 0, expected, "Pread Test 1");
// Now check to see if the normal read returns 4K-8K byte range
actual = new byte[4096];
stm.readFully(actual);
checkAndEraseData(actual, 4096, expected, "Pread Test 2");
// Now see if we can cross a single block boundary successfully
// read 4K bytes from blockSize - 2K offset
stm.readFully(blockSize - 2048, actual, 0, 4096);
checkAndEraseData(actual, (blockSize - 2048), expected, "Pread Test 3");
// now see if we can cross two block boundaries successfully
// read blockSize + 4K bytes from blockSize - 2K offset
actual = new byte[blockSize + 4096];
stm.readFully(blockSize - 2048, actual);
checkAndEraseData(actual, (blockSize - 2048), expected, "Pread Test 4");
// now see if we can cross two block boundaries that are not cached
// read blockSize + 4K bytes from 10*blockSize - 2K offset
actual = new byte[blockSize + 4096];
stm.readFully(10 * blockSize - 2048, actual);
checkAndEraseData(actual, (10 * blockSize - 2048), expected, "Pread Test 5");
// now check that even after all these preads, we can still read
// bytes 8K-12K
actual = new byte[4096];
stm.readFully(actual);
checkAndEraseData(actual, 8192, expected, "Pread Test 6");
// done
stm.close();
// check block location caching
stm = fileSys.open(name);
stm.readFully(1, actual, 0, 4096);
stm.readFully(4*blockSize, actual, 0, 4096);
stm.readFully(7*blockSize, actual, 0, 4096);
actual = new byte[3*4096];
stm.readFully(0*blockSize, actual, 0, 3*4096);
checkAndEraseData(actual, 0, expected, "Pread Test 7");
actual = new byte[8*4096];
stm.readFully(3*blockSize, actual, 0, 8*4096);
checkAndEraseData(actual, 3*blockSize, expected, "Pread Test 8");
// read the tail
stm.readFully(11*blockSize+blockSize/2, actual, 0, blockSize/2);
IOException res = null;
try { // read beyond the end of the file
stm.readFully(11*blockSize+blockSize/2, actual, 0, blockSize);
} catch (IOException e) {
// should throw an exception
res = e;
}
assertTrue("Error reading beyond file boundary.", res != null);
stm.close();
}
// test pread can survive datanode restarts
private void datanodeRestartTest(MiniDFSCluster cluster, FileSystem fileSys,
Path name) throws IOException {
// skip this test if using simulated storage since simulated blocks
// don't survive datanode restarts.
if (simulatedStorage) {
return;
}
int numBlocks = 1;
assertTrue(numBlocks <= DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT);
byte[] expected = new byte[numBlocks * blockSize];
Random rand = new Random(seed);
rand.nextBytes(expected);
byte[] actual = new byte[numBlocks * blockSize];
FSDataInputStream stm = fileSys.open(name);
// read a block and get block locations cached as a result
stm.readFully(0, actual);
checkAndEraseData(actual, 0, expected, "Pread Datanode Restart Setup");
// restart all datanodes. it is expected that they will
// restart on different ports, hence, cached block locations
// will no longer work.
assertTrue(cluster.restartDataNodes());
cluster.waitActive();
// verify the block can be read again using the same InputStream
// (via re-fetching of block locations from namenode). there is a
// 3 sec sleep in chooseDataNode(), which can be shortened for
// this test if configurable.
stm.readFully(0, actual);
checkAndEraseData(actual, 0, expected, "Pread Datanode Restart Test");
}
private void cleanupFile(FileSystem fileSys, Path name) throws IOException {
assertTrue(fileSys.exists(name));
assertTrue(fileSys.delete(name, true));
assertTrue(!fileSys.exists(name));
}
private Callable<Void> getPReadFileCallable(final FileSystem fileSys,
final Path file) {
return new Callable<Void>() {
public Void call() throws IOException {
pReadFile(fileSys, file);
return null;
}
};
}
/**
* Tests positional read in DFS.
*/
@Test
public void testPreadDFS() throws IOException {
Configuration conf = new Configuration();
dfsPreadTest(conf, false, true); // normal pread
dfsPreadTest(conf, true, true); // trigger read code path without
// transferTo.
}
@Test
public void testPreadDFSNoChecksum() throws IOException {
Configuration conf = new Configuration();
((Log4JLogger)DataTransferProtocol.LOG).getLogger().setLevel(Level.ALL);
dfsPreadTest(conf, false, false);
dfsPreadTest(conf, true, false);
}
/**
* Tests positional read in DFS, with hedged reads enabled.
*/
@Test
public void testHedgedPreadDFSBasic() throws IOException {
isHedgedRead = true;
Configuration conf = new Configuration();
conf.setInt(HdfsClientConfigKeys.HedgedRead.THREADPOOL_SIZE_KEY, 5);
conf.setLong(HdfsClientConfigKeys.HedgedRead.THRESHOLD_MILLIS_KEY, 1);
dfsPreadTest(conf, false, true); // normal pread
dfsPreadTest(conf, true, true); // trigger read code path without
// transferTo.
}
@Test
public void testHedgedReadLoopTooManyTimes() throws IOException {
Configuration conf = new Configuration();
int numHedgedReadPoolThreads = 5;
final int hedgedReadTimeoutMillis = 50;
conf.setInt(HdfsClientConfigKeys.HedgedRead.THREADPOOL_SIZE_KEY,
numHedgedReadPoolThreads);
conf.setLong(HdfsClientConfigKeys.HedgedRead.THRESHOLD_MILLIS_KEY,
hedgedReadTimeoutMillis);
conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 0);
// Set up the InjectionHandler
DFSClientFaultInjector.instance = Mockito
.mock(DFSClientFaultInjector.class);
DFSClientFaultInjector injector = DFSClientFaultInjector.instance;
final int sleepMs = 100;
Mockito.doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
if (true) {
Thread.sleep(hedgedReadTimeoutMillis + sleepMs);
if (DFSClientFaultInjector.exceptionNum.compareAndSet(0, 1)) {
System.out.println("-------------- throw Checksum Exception");
throw new ChecksumException("ChecksumException test", 100);
}
}
return null;
}
}).when(injector).fetchFromDatanodeException();
Mockito.doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
if (true) {
Thread.sleep(sleepMs * 2);
}
return null;
}
}).when(injector).readFromDatanodeDelay();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2)
.format(true).build();
DistributedFileSystem fileSys = cluster.getFileSystem();
DFSClient dfsClient = fileSys.getClient();
FSDataOutputStream output = null;
DFSInputStream input = null;
String filename = "/hedgedReadMaxOut.dat";
try {
Path file = new Path(filename);
output = fileSys.create(file, (short) 2);
byte[] data = new byte[64 * 1024];
output.write(data);
output.flush();
output.write(data);
output.flush();
output.write(data);
output.flush();
output.close();
byte[] buffer = new byte[64 * 1024];
input = dfsClient.open(filename);
input.read(0, buffer, 0, 1024);
input.close();
assertEquals(3, input.getHedgedReadOpsLoopNumForTesting());
} catch (BlockMissingException e) {
assertTrue(false);
} finally {
Mockito.reset(injector);
IOUtils.cleanup(null, input);
IOUtils.cleanup(null, output);
fileSys.close();
cluster.shutdown();
}
}
@Test
public void testMaxOutHedgedReadPool() throws IOException,
InterruptedException, ExecutionException {
isHedgedRead = true;
Configuration conf = new Configuration();
int numHedgedReadPoolThreads = 5;
final int initialHedgedReadTimeoutMillis = 50000;
final int fixedSleepIntervalMillis = 50;
conf.setInt(HdfsClientConfigKeys.HedgedRead.THREADPOOL_SIZE_KEY,
numHedgedReadPoolThreads);
conf.setLong(HdfsClientConfigKeys.HedgedRead.THRESHOLD_MILLIS_KEY,
initialHedgedReadTimeoutMillis);
// Set up the InjectionHandler
DFSClientFaultInjector.instance = Mockito
.mock(DFSClientFaultInjector.class);
DFSClientFaultInjector injector = DFSClientFaultInjector.instance;
// make preads sleep for 50ms
Mockito.doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
Thread.sleep(fixedSleepIntervalMillis);
return null;
}
}).when(injector).startFetchFromDatanode();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3)
.format(true).build();
DistributedFileSystem fileSys = cluster.getFileSystem();
DFSClient dfsClient = fileSys.getClient();
DFSHedgedReadMetrics metrics = dfsClient.getHedgedReadMetrics();
// Metrics instance is static, so we need to reset counts from prior tests.
metrics.hedgedReadOps.set(0);
metrics.hedgedReadOpsWin.set(0);
metrics.hedgedReadOpsInCurThread.set(0);
try {
Path file1 = new Path("hedgedReadMaxOut.dat");
writeFile(fileSys, file1);
// Basic test. Reads complete within timeout. Assert that there were no
// hedged reads.
pReadFile(fileSys, file1);
// assert that there were no hedged reads. 50ms + delta < 500ms
assertTrue(metrics.getHedgedReadOps() == 0);
assertTrue(metrics.getHedgedReadOpsInCurThread() == 0);
/*
* Reads take longer than timeout. But, only one thread reading. Assert
* that there were hedged reads. But, none of the reads had to run in the
* current thread.
*/
{
Configuration conf2 = new Configuration(cluster.getConfiguration(0));
conf2.setBoolean("fs.hdfs.impl.disable.cache", true);
conf2.setLong(HdfsClientConfigKeys.HedgedRead.THRESHOLD_MILLIS_KEY, 50);
fileSys.close();
fileSys = (DistributedFileSystem)FileSystem.get(cluster.getURI(0), conf2);
metrics = fileSys.getClient().getHedgedReadMetrics();
}
pReadFile(fileSys, file1);
// assert that there were hedged reads
assertTrue(metrics.getHedgedReadOps() > 0);
assertTrue(metrics.getHedgedReadOpsInCurThread() == 0);
/*
* Multiple threads reading. Reads take longer than timeout. Assert that
* there were hedged reads. And that reads had to run in the current
* thread.
*/
int factor = 10;
int numHedgedReads = numHedgedReadPoolThreads * factor;
long initialReadOpsValue = metrics.getHedgedReadOps();
ExecutorService executor = Executors.newFixedThreadPool(numHedgedReads);
ArrayList<Future<Void>> futures = new ArrayList<Future<Void>>();
for (int i = 0; i < numHedgedReads; i++) {
futures.add(executor.submit(getPReadFileCallable(fileSys, file1)));
}
for (int i = 0; i < numHedgedReads; i++) {
futures.get(i).get();
}
assertTrue(metrics.getHedgedReadOps() > initialReadOpsValue);
assertTrue(metrics.getHedgedReadOpsInCurThread() > 0);
cleanupFile(fileSys, file1);
executor.shutdown();
} finally {
fileSys.close();
cluster.shutdown();
Mockito.reset(injector);
}
}
private void dfsPreadTest(Configuration conf, boolean disableTransferTo, boolean verifyChecksum)
throws IOException {
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096);
conf.setLong(HdfsClientConfigKeys.Read.PREFETCH_SIZE_KEY, 4096);
// Set short retry timeouts so this test runs faster
conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 0);
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
if (disableTransferTo) {
conf.setBoolean("dfs.datanode.transferTo.allowed", false);
}
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
FileSystem fileSys = cluster.getFileSystem();
fileSys.setVerifyChecksum(verifyChecksum);
try {
Path file1 = new Path("/preadtest.dat");
writeFile(fileSys, file1);
pReadFile(fileSys, file1);
datanodeRestartTest(cluster, fileSys, file1);
cleanupFile(fileSys, file1);
} finally {
fileSys.close();
cluster.shutdown();
}
}
@Test
public void testPreadDFSSimulated() throws IOException {
simulatedStorage = true;
testPreadDFS();
}
/**
* Tests positional read in LocalFS.
*/
@Test
public void testPreadLocalFS() throws IOException {
Configuration conf = new HdfsConfiguration();
FileSystem fileSys = FileSystem.getLocal(conf);
try {
Path file1 = new Path("build/test/data", "preadtest.dat");
writeFile(fileSys, file1);
pReadFile(fileSys, file1);
cleanupFile(fileSys, file1);
} finally {
fileSys.close();
}
}
public static void main(String[] args) throws Exception {
new TestPread().testPreadDFS();
}
}
| 18,997 | 36.769384 | 98 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.io.IOUtils;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
/**
* A JUnit test for corrupted file handling.
* This test creates a bunch of files/directories with replication
* factor of 2. Then verifies that a client can automatically
* access the remaining valid replica inspite of the following
* types of simulated errors:
*
* 1. Delete meta file on one replica
* 2. Truncates meta file on one replica
* 3. Corrupts the meta file header on one replica
* 4. Corrupts any random offset and portion of the meta file
* 5. Swaps two meta files, i.e the format of the meta files
* are valid but their CRCs do not match with their corresponding
* data blocks
* The above tests are run for varied values of dfs.bytes-per-checksum
* and dfs.blocksize. It tests for the case when the meta file is
* multiple blocks.
*
* Another portion of the test is commented out till HADOOP-1557
* is addressed:
* 1. Create file with 2 replica, corrupt the meta file of replica,
* decrease replication factor from 2 to 1. Validate that the
* remaining replica is the good one.
* 2. Create file with 2 replica, corrupt the meta file of one replica,
* increase replication factor of file to 3. verify that the new
* replica was created from the non-corrupted replica.
*/
public class TestCrcCorruption {
private DFSClientFaultInjector faultInjector;
@Before
public void setUp() throws IOException {
faultInjector = Mockito.mock(DFSClientFaultInjector.class);
DFSClientFaultInjector.instance = faultInjector;
}
/**
* Test case for data corruption during data transmission for
* create/write. To recover from corruption while writing, at
* least two replicas are needed.
*/
@Test(timeout=50000)
public void testCorruptionDuringWrt() throws Exception {
Configuration conf = new HdfsConfiguration();
// Set short retry timeouts so this test runs faster
conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(10).build();
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
Path file = new Path("/test_corruption_file");
FSDataOutputStream out = fs.create(file, true, 8192, (short)3, (long)(128*1024*1024));
byte[] data = new byte[65536];
for (int i=0; i < 65536; i++) {
data[i] = (byte)(i % 256);
}
for (int i = 0; i < 5; i++) {
out.write(data, 0, 65535);
}
out.hflush();
// corrupt the packet once
Mockito.when(faultInjector.corruptPacket()).thenReturn(true, false);
Mockito.when(faultInjector.uncorruptPacket()).thenReturn(true, false);
for (int i = 0; i < 5; i++) {
out.write(data, 0, 65535);
}
out.close();
// read should succeed
FSDataInputStream in = fs.open(file);
for(int c; (c = in.read()) != -1; );
in.close();
// test the retry limit
out = fs.create(file, true, 8192, (short)3, (long)(128*1024*1024));
// corrupt the packet once and never fix it.
Mockito.when(faultInjector.corruptPacket()).thenReturn(true, false);
Mockito.when(faultInjector.uncorruptPacket()).thenReturn(false);
// the client should give up pipeline reconstruction after retries.
try {
for (int i = 0; i < 5; i++) {
out.write(data, 0, 65535);
}
out.close();
fail("Write did not fail");
} catch (IOException ioe) {
// we should get an ioe
DFSClient.LOG.info("Got expected exception", ioe);
}
} finally {
if (cluster != null) { cluster.shutdown(); }
Mockito.when(faultInjector.corruptPacket()).thenReturn(false);
Mockito.when(faultInjector.uncorruptPacket()).thenReturn(false);
}
}
/**
* check if DFS can handle corrupted CRC blocks
*/
private void thistest(Configuration conf, DFSTestUtil util) throws Exception {
MiniDFSCluster cluster = null;
int numDataNodes = 2;
short replFactor = 2;
Random random = new Random();
// Set short retry timeouts so this test runs faster
conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
util.createFiles(fs, "/srcdat", replFactor);
util.waitReplication(fs, "/srcdat", (short)2);
// Now deliberately remove/truncate meta blocks from the first
// directory of the first datanode. The complete absense of a meta
// file disallows this Datanode to send data to another datanode.
// However, a client is alowed access to this block.
//
File storageDir = cluster.getInstanceStorageDir(0, 1);
String bpid = cluster.getNamesystem().getBlockPoolId();
File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
assertTrue("data directory does not exist", data_dir.exists());
File[] blocks = data_dir.listFiles();
assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length > 0));
int num = 0;
for (int idx = 0; idx < blocks.length; idx++) {
if (blocks[idx].getName().startsWith(Block.BLOCK_FILE_PREFIX) &&
blocks[idx].getName().endsWith(".meta")) {
num++;
if (num % 3 == 0) {
//
// remove .meta file
//
System.out.println("Deliberately removing file " + blocks[idx].getName());
assertTrue("Cannot remove file.", blocks[idx].delete());
} else if (num % 3 == 1) {
//
// shorten .meta file
//
RandomAccessFile file = new RandomAccessFile(blocks[idx], "rw");
FileChannel channel = file.getChannel();
int newsize = random.nextInt((int)channel.size()/2);
System.out.println("Deliberately truncating file " +
blocks[idx].getName() +
" to size " + newsize + " bytes.");
channel.truncate(newsize);
file.close();
} else {
//
// corrupt a few bytes of the metafile
//
RandomAccessFile file = new RandomAccessFile(blocks[idx], "rw");
FileChannel channel = file.getChannel();
long position = 0;
//
// The very first time, corrupt the meta header at offset 0
//
if (num != 2) {
position = (long)random.nextInt((int)channel.size());
}
int length = random.nextInt((int)(channel.size() - position + 1));
byte[] buffer = new byte[length];
random.nextBytes(buffer);
channel.write(ByteBuffer.wrap(buffer), position);
System.out.println("Deliberately corrupting file " +
blocks[idx].getName() +
" at offset " + position +
" length " + length);
file.close();
}
}
}
//
// Now deliberately corrupt all meta blocks from the second
// directory of the first datanode
//
storageDir = cluster.getInstanceStorageDir(0, 1);
data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
assertTrue("data directory does not exist", data_dir.exists());
blocks = data_dir.listFiles();
assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length > 0));
int count = 0;
File previous = null;
for (int idx = 0; idx < blocks.length; idx++) {
if (blocks[idx].getName().startsWith("blk_") &&
blocks[idx].getName().endsWith(".meta")) {
//
// Move the previous metafile into the current one.
//
count++;
if (count % 2 == 0) {
System.out.println("Deliberately insertimg bad crc into files " +
blocks[idx].getName() + " " + previous.getName());
assertTrue("Cannot remove file.", blocks[idx].delete());
assertTrue("Cannot corrupt meta file.", previous.renameTo(blocks[idx]));
assertTrue("Cannot recreate empty meta file.", previous.createNewFile());
previous = null;
} else {
previous = blocks[idx];
}
}
}
//
// Only one replica is possibly corrupted. The other replica should still
// be good. Verify.
//
assertTrue("Corrupted replicas not handled properly.",
util.checkFiles(fs, "/srcdat"));
System.out.println("All File still have a valid replica");
//
// set replication factor back to 1. This causes only one replica of
// of each block to remain in HDFS. The check is to make sure that
// the corrupted replica generated above is the one that gets deleted.
// This test is currently disabled until HADOOP-1557 is solved.
//
util.setReplication(fs, "/srcdat", (short)1);
//util.waitReplication(fs, "/srcdat", (short)1);
//System.out.println("All Files done with removing replicas");
//assertTrue("Excess replicas deleted. Corrupted replicas found.",
// util.checkFiles(fs, "/srcdat"));
System.out.println("The excess-corrupted-replica test is disabled " +
" pending HADOOP-1557");
util.cleanup(fs, "/srcdat");
} finally {
if (cluster != null) { cluster.shutdown(); }
}
}
@Test
public void testCrcCorruption() throws Exception {
//
// default parameters
//
System.out.println("TestCrcCorruption with default parameters");
Configuration conf1 = new HdfsConfiguration();
conf1.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 3 * 1000);
DFSTestUtil util1 = new DFSTestUtil.Builder().setName("TestCrcCorruption").
setNumFiles(40).build();
thistest(conf1, util1);
//
// specific parameters
//
System.out.println("TestCrcCorruption with specific parameters");
Configuration conf2 = new HdfsConfiguration();
conf2.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 17);
conf2.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 34);
DFSTestUtil util2 = new DFSTestUtil.Builder().setName("TestCrcCorruption").
setNumFiles(40).setMaxSize(400).build();
thistest(conf2, util2);
}
/**
* Make a single-DN cluster, corrupt a block, and make sure
* there's no infinite loop, but rather it eventually
* reports the exception to the client.
*/
@Test(timeout=300000) // 5 min timeout
public void testEntirelyCorruptFileOneNode() throws Exception {
doTestEntirelyCorruptFile(1);
}
/**
* Same thing with multiple datanodes - in history, this has
* behaved differently than the above.
*
* This test usually completes in around 15 seconds - if it
* times out, this suggests that the client is retrying
* indefinitely.
*/
@Test(timeout=300000) // 5 min timeout
public void testEntirelyCorruptFileThreeNodes() throws Exception {
doTestEntirelyCorruptFile(3);
}
private void doTestEntirelyCorruptFile(int numDataNodes) throws Exception {
long fileSize = 4096;
Path file = new Path("/testFile");
short replFactor = (short)numDataNodes;
Configuration conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, numDataNodes);
// Set short retry timeouts so this test runs faster
conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
try {
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, file, fileSize, replFactor, 12345L /*seed*/);
DFSTestUtil.waitReplication(fs, file, replFactor);
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, file);
int blockFilesCorrupted = cluster.corruptBlockOnDataNodes(block);
assertEquals("All replicas not corrupted", replFactor, blockFilesCorrupted);
try {
IOUtils.copyBytes(fs.open(file), new IOUtils.NullOutputStream(), conf,
true);
fail("Didn't get exception");
} catch (IOException ioe) {
DFSClient.LOG.info("Got expected exception", ioe);
}
} finally {
cluster.shutdown();
}
}
}
| 14,163 | 37.594005 | 97 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListPathServlet.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.server.namenode.ListPathsServlet;
import org.apache.hadoop.hdfs.web.HftpFileSystem;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
/**
* Test for {@link ListPathsServlet} that serves the URL
* http://<namenodeaddress:httpport?/listPaths
*
* This test does not use the servlet directly. Instead it is based on
* {@link HftpFileSystem}, which uses this servlet to implement
* {@link HftpFileSystem#listStatus(Path)} method.
*/
public class TestListPathServlet {
private static final Configuration CONF = new HdfsConfiguration();
private static MiniDFSCluster cluster;
private static FileSystem fs;
private static URI hftpURI;
private static HftpFileSystem hftpFs;
private final Random r = new Random();
private final List<String> filelist = new ArrayList<String>();
@BeforeClass
public static void setup() throws Exception {
// start a cluster with single datanode
cluster = new MiniDFSCluster.Builder(CONF).build();
cluster.waitActive();
fs = cluster.getFileSystem();
final String str = "hftp://"
+ CONF.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
hftpURI = new URI(str);
hftpFs = cluster.getHftpFileSystem(0);
}
@AfterClass
public static void teardown() {
cluster.shutdown();
}
/** create a file with a length of <code>fileLen</code> */
private void createFile(String fileName, long fileLen) throws IOException {
filelist.add(hftpURI + fileName);
final Path filePath = new Path(fileName);
DFSTestUtil.createFile(fs, filePath, fileLen, (short) 1, r.nextLong());
}
private void mkdirs(String dirName) throws IOException {
filelist.add(hftpURI + dirName);
fs.mkdirs(new Path(dirName));
}
@Test
public void testListStatus() throws Exception {
// Empty root directory
checkStatus("/");
// Root directory with files and directories
createFile("/a", 1);
createFile("/b", 1);
mkdirs("/dir");
checkFile(new Path("/a"));
checkFile(new Path("/b"));
checkStatus("/");
// A directory with files and directories
createFile("/dir/.a.crc", 1);
createFile("/dir/b", 1);
mkdirs("/dir/dir1");
checkFile(new Path("/dir/.a.crc"));
checkFile(new Path("/dir/b"));
checkStatus("/dir");
// Non existent path
checkStatus("/nonexistent");
checkStatus("/nonexistent/a");
final String username = UserGroupInformation.getCurrentUser().getShortUserName() + "1";
final HftpFileSystem hftp2 = cluster.getHftpFileSystemAs(username, CONF, 0, "somegroup");
{ //test file not found on hftp
final Path nonexistent = new Path("/nonexistent");
try {
hftp2.getFileStatus(nonexistent);
Assert.fail();
} catch(IOException ioe) {
FileSystem.LOG.info("GOOD: getting an exception", ioe);
}
}
{ //test permission error on hftp
final Path dir = new Path("/dir");
fs.setPermission(dir, new FsPermission((short)0));
try {
hftp2.getFileStatus(new Path(dir, "a"));
Assert.fail();
} catch(IOException ioe) {
FileSystem.LOG.info("GOOD: getting an exception", ioe);
}
}
}
private void checkStatus(String listdir) throws IOException {
final Path listpath = hftpFs.makeQualified(new Path(listdir));
listdir = listpath.toString();
final FileStatus[] statuslist = hftpFs.listStatus(listpath);
for (String directory : filelist) {
System.out.println("dir:" + directory);
}
for (String file : filelist) {
System.out.println("file:" + file);
}
for (FileStatus status : statuslist) {
System.out.println("status:" + status.getPath().toString() + " type "
+ (status.isDirectory() ? "directory"
: ( status.isFile() ? "file" : "symlink")));
}
for (String file : filelist) {
boolean found = false;
// Consider only file under the list path
if (!file.startsWith(listpath.toString()) ||
file.equals(listpath.toString())) {
continue;
}
for (FileStatus status : statuslist) {
if (status.getPath().toString().equals(file)) {
found = true;
break;
}
}
Assert.assertTrue("Directory/file not returned in list status " + file,
found);
}
}
private void checkFile(final Path f) throws IOException {
final Path hdfspath = fs.makeQualified(f);
final FileStatus hdfsstatus = fs.getFileStatus(hdfspath);
FileSystem.LOG.info("hdfspath=" + hdfspath);
final Path hftppath = hftpFs.makeQualified(f);
final FileStatus hftpstatus = hftpFs.getFileStatus(hftppath);
FileSystem.LOG.info("hftppath=" + hftppath);
Assert.assertEquals(hdfspath.toUri().getPath(),
hdfsstatus.getPath().toUri().getPath());
checkFileStatus(hdfsstatus, hftpstatus);
}
private static void checkFileStatus(final FileStatus expected,
final FileStatus computed) {
Assert.assertEquals(expected.getPath().toUri().getPath(),
computed.getPath().toUri().getPath());
// TODO: test will fail if the following is un-commented.
// Assert.assertEquals(expected.getAccessTime(), computed.getAccessTime());
// Assert.assertEquals(expected.getModificationTime(),
// computed.getModificationTime());
Assert.assertEquals(expected.getBlockSize(), computed.getBlockSize());
Assert.assertEquals(expected.getGroup(), computed.getGroup());
Assert.assertEquals(expected.getLen(), computed.getLen());
Assert.assertEquals(expected.getOwner(), computed.getOwner());
Assert.assertEquals(expected.getPermission(), computed.getPermission());
Assert.assertEquals(expected.getReplication(), computed.getReplication());
}
}
| 7,065 | 33.980198 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.mockito.Matchers.anyBoolean;
import static org.mockito.Matchers.anyString;
import static org.mockito.Matchers.anyShort;
import static org.mockito.Matchers.anyLong;
import static org.mockito.Matchers.anyObject;
import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.spy;
import java.io.DataOutputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.CryptoProtocolVersion;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.client.impl.LeaseRenewer;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Time;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.Mockito;
public class TestLease {
static boolean hasLease(MiniDFSCluster cluster, Path src) {
return NameNodeAdapter.getLeaseForPath(cluster.getNameNode(),
src.toString()) != null;
}
static int leaseCount(MiniDFSCluster cluster) {
return NameNodeAdapter.getLeaseManager(cluster.getNamesystem()).countLease();
}
static final String dirString = "/test/lease";
final Path dir = new Path(dirString);
static final Log LOG = LogFactory.getLog(TestLease.class);
final Configuration conf = new HdfsConfiguration();
@Test
public void testLeaseAbort() throws Exception {
MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
try {
cluster.waitActive();
NamenodeProtocols preSpyNN = cluster.getNameNodeRpc();
NamenodeProtocols spyNN = spy(preSpyNN);
DFSClient dfs = new DFSClient(null, spyNN, conf, null);
byte[] buf = new byte[1024];
FSDataOutputStream c_out = createFsOut(dfs, dirString + "c");
c_out.write(buf, 0, 1024);
c_out.close();
DFSInputStream c_in = dfs.open(dirString + "c");
FSDataOutputStream d_out = createFsOut(dfs, dirString + "d");
// stub the renew method.
doThrow(new RemoteException(InvalidToken.class.getName(),
"Your token is worthless")).when(spyNN).renewLease(anyString());
// We don't need to wait the lease renewer thread to act.
// call renewLease() manually.
// make it look like the soft limit has been exceeded.
LeaseRenewer originalRenewer = dfs.getLeaseRenewer();
dfs.lastLeaseRenewal = Time.monotonicNow()
- HdfsServerConstants.LEASE_SOFTLIMIT_PERIOD - 1000;
try {
dfs.renewLease();
} catch (IOException e) {}
// Things should continue to work it passes hard limit without
// renewing.
try {
d_out.write(buf, 0, 1024);
LOG.info("Write worked beyond the soft limit as expected.");
} catch (IOException e) {
Assert.fail("Write failed.");
}
// make it look like the hard limit has been exceeded.
dfs.lastLeaseRenewal = Time.monotonicNow()
- HdfsServerConstants.LEASE_HARDLIMIT_PERIOD - 1000;
dfs.renewLease();
// this should not work.
try {
d_out.write(buf, 0, 1024);
d_out.close();
Assert.fail("Write did not fail even after the fatal lease renewal failure");
} catch (IOException e) {
LOG.info("Write failed as expected. ", e);
}
// If aborted, the renewer should be empty. (no reference to clients)
Thread.sleep(1000);
Assert.assertTrue(originalRenewer.isEmpty());
// unstub
doNothing().when(spyNN).renewLease(anyString());
// existing input streams should work
try {
int num = c_in.read(buf, 0, 1);
if (num != 1) {
Assert.fail("Failed to read 1 byte");
}
c_in.close();
} catch (IOException e) {
LOG.error("Read failed with ", e);
Assert.fail("Read after lease renewal failure failed");
}
// new file writes should work.
try {
c_out = createFsOut(dfs, dirString + "c");
c_out.write(buf, 0, 1024);
c_out.close();
} catch (IOException e) {
LOG.error("Write failed with ", e);
Assert.fail("Write failed");
}
} finally {
cluster.shutdown();
}
}
@Test
public void testLeaseAfterRename() throws Exception {
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
try {
Path p = new Path("/test-file");
Path d = new Path("/test-d");
Path d2 = new Path("/test-d-other");
// open a file to get a lease
FileSystem fs = cluster.getFileSystem();
FSDataOutputStream out = fs.create(p);
out.writeBytes("something");
//out.hsync();
Assert.assertTrue(hasLease(cluster, p));
Assert.assertEquals(1, leaseCount(cluster));
// just to ensure first fs doesn't have any logic to twiddle leases
DistributedFileSystem fs2 = (DistributedFileSystem) FileSystem.newInstance(fs.getUri(), fs.getConf());
// rename the file into an existing dir
LOG.info("DMS: rename file into dir");
Path pRenamed = new Path(d, p.getName());
fs2.mkdirs(d);
fs2.rename(p, pRenamed);
Assert.assertFalse(p+" exists", fs2.exists(p));
Assert.assertTrue(pRenamed+" not found", fs2.exists(pRenamed));
Assert.assertFalse("has lease for "+p, hasLease(cluster, p));
Assert.assertTrue("no lease for "+pRenamed, hasLease(cluster, pRenamed));
Assert.assertEquals(1, leaseCount(cluster));
// rename the parent dir to a new non-existent dir
LOG.info("DMS: rename parent dir");
Path pRenamedAgain = new Path(d2, pRenamed.getName());
fs2.rename(d, d2);
// src gone
Assert.assertFalse(d+" exists", fs2.exists(d));
Assert.assertFalse("has lease for "+pRenamed, hasLease(cluster, pRenamed));
// dst checks
Assert.assertTrue(d2+" not found", fs2.exists(d2));
Assert.assertTrue(pRenamedAgain+" not found", fs2.exists(pRenamedAgain));
Assert.assertTrue("no lease for "+pRenamedAgain, hasLease(cluster, pRenamedAgain));
Assert.assertEquals(1, leaseCount(cluster));
// rename the parent dir to existing dir
// NOTE: rename w/o options moves paths into existing dir
LOG.info("DMS: rename parent again");
pRenamed = pRenamedAgain;
pRenamedAgain = new Path(new Path(d, d2.getName()), p.getName());
fs2.mkdirs(d);
fs2.rename(d2, d);
// src gone
Assert.assertFalse(d2+" exists", fs2.exists(d2));
Assert.assertFalse("no lease for "+pRenamed, hasLease(cluster, pRenamed));
// dst checks
Assert.assertTrue(d+" not found", fs2.exists(d));
Assert.assertTrue(pRenamedAgain +" not found", fs2.exists(pRenamedAgain));
Assert.assertTrue("no lease for "+pRenamedAgain, hasLease(cluster, pRenamedAgain));
Assert.assertEquals(1, leaseCount(cluster));
// rename with opts to non-existent dir
pRenamed = pRenamedAgain;
pRenamedAgain = new Path(d2, p.getName());
fs2.rename(pRenamed.getParent(), d2, Options.Rename.OVERWRITE);
// src gone
Assert.assertFalse(pRenamed.getParent() +" not found", fs2.exists(pRenamed.getParent()));
Assert.assertFalse("has lease for "+pRenamed, hasLease(cluster, pRenamed));
// dst checks
Assert.assertTrue(d2+" not found", fs2.exists(d2));
Assert.assertTrue(pRenamedAgain+" not found", fs2.exists(pRenamedAgain));
Assert.assertTrue("no lease for "+pRenamedAgain, hasLease(cluster, pRenamedAgain));
Assert.assertEquals(1, leaseCount(cluster));
// rename with opts to existing dir
// NOTE: rename with options will not move paths into the existing dir
pRenamed = pRenamedAgain;
pRenamedAgain = new Path(d, p.getName());
fs2.rename(pRenamed.getParent(), d, Options.Rename.OVERWRITE);
// src gone
Assert.assertFalse(pRenamed.getParent() +" not found", fs2.exists(pRenamed.getParent()));
Assert.assertFalse("has lease for "+pRenamed, hasLease(cluster, pRenamed));
// dst checks
Assert.assertTrue(d+" not found", fs2.exists(d));
Assert.assertTrue(pRenamedAgain+" not found", fs2.exists(pRenamedAgain));
Assert.assertTrue("no lease for "+pRenamedAgain, hasLease(cluster, pRenamedAgain));
Assert.assertEquals(1, leaseCount(cluster));
out.close();
} finally {
cluster.shutdown();
}
}
/**
* Test that we can open up a file for write, move it to another location,
* and then create a new file in the previous location, without causing any
* lease conflicts. This is possible because we now use unique inode IDs
* to identify files to the NameNode.
*/
@Test
public void testLeaseAfterRenameAndRecreate() throws Exception {
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
try {
final Path path1 = new Path("/test-file");
final String contents1 = "contents1";
final Path path2 = new Path("/test-file-new-location");
final String contents2 = "contents2";
// open a file to get a lease
FileSystem fs = cluster.getFileSystem();
FSDataOutputStream out1 = fs.create(path1);
out1.writeBytes(contents1);
Assert.assertTrue(hasLease(cluster, path1));
Assert.assertEquals(1, leaseCount(cluster));
DistributedFileSystem fs2 = (DistributedFileSystem)
FileSystem.newInstance(fs.getUri(), fs.getConf());
fs2.rename(path1, path2);
FSDataOutputStream out2 = fs2.create(path1);
out2.writeBytes(contents2);
out2.close();
// The first file should still be open and valid
Assert.assertTrue(hasLease(cluster, path2));
out1.close();
// Contents should be as expected
DistributedFileSystem fs3 = (DistributedFileSystem)
FileSystem.newInstance(fs.getUri(), fs.getConf());
Assert.assertEquals(contents1, DFSTestUtil.readFile(fs3, path2));
Assert.assertEquals(contents2, DFSTestUtil.readFile(fs3, path1));
} finally {
cluster.shutdown();
}
}
@Test
public void testLease() throws Exception {
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
try {
FileSystem fs = cluster.getFileSystem();
Assert.assertTrue(fs.mkdirs(dir));
Path a = new Path(dir, "a");
Path b = new Path(dir, "b");
DataOutputStream a_out = fs.create(a);
a_out.writeBytes("something");
Assert.assertTrue(hasLease(cluster, a));
Assert.assertTrue(!hasLease(cluster, b));
DataOutputStream b_out = fs.create(b);
b_out.writeBytes("something");
Assert.assertTrue(hasLease(cluster, a));
Assert.assertTrue(hasLease(cluster, b));
a_out.close();
b_out.close();
Assert.assertTrue(!hasLease(cluster, a));
Assert.assertTrue(!hasLease(cluster, b));
Path fileA = new Path(dir, "fileA");
FSDataOutputStream fileA_out = fs.create(fileA);
fileA_out.writeBytes("something");
Assert.assertTrue("Failed to get the lease!", hasLease(cluster, fileA));
fs.delete(dir, true);
try {
fileA_out.hflush();
Assert.fail("Should validate file existence!");
} catch (FileNotFoundException e) {
// expected
GenericTestUtils.assertExceptionContains("File does not exist", e);
}
} finally {
if (cluster != null) {cluster.shutdown();}
}
}
@SuppressWarnings("unchecked")
@Test
public void testFactory() throws Exception {
final String[] groups = new String[]{"supergroup"};
final UserGroupInformation[] ugi = new UserGroupInformation[3];
for(int i = 0; i < ugi.length; i++) {
ugi[i] = UserGroupInformation.createUserForTesting("user" + i, groups);
}
Mockito.doReturn(
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
(short) 777), "owner", "group", new byte[0], new byte[0],
1010, 0, null, (byte) 0)).when(mcp).getFileInfo(anyString());
Mockito
.doReturn(
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
(short) 777), "owner", "group", new byte[0], new byte[0],
1010, 0, null, (byte) 0))
.when(mcp)
.create(anyString(), (FsPermission) anyObject(), anyString(),
(EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
anyShort(), anyLong(), (CryptoProtocolVersion[]) anyObject());
final Configuration conf = new Configuration();
final DFSClient c1 = createDFSClientAs(ugi[0], conf);
FSDataOutputStream out1 = createFsOut(c1, "/out1");
final DFSClient c2 = createDFSClientAs(ugi[0], conf);
FSDataOutputStream out2 = createFsOut(c2, "/out2");
Assert.assertEquals(c1.getLeaseRenewer(), c2.getLeaseRenewer());
final DFSClient c3 = createDFSClientAs(ugi[1], conf);
FSDataOutputStream out3 = createFsOut(c3, "/out3");
Assert.assertTrue(c1.getLeaseRenewer() != c3.getLeaseRenewer());
final DFSClient c4 = createDFSClientAs(ugi[1], conf);
FSDataOutputStream out4 = createFsOut(c4, "/out4");
Assert.assertEquals(c3.getLeaseRenewer(), c4.getLeaseRenewer());
final DFSClient c5 = createDFSClientAs(ugi[2], conf);
FSDataOutputStream out5 = createFsOut(c5, "/out5");
Assert.assertTrue(c1.getLeaseRenewer() != c5.getLeaseRenewer());
Assert.assertTrue(c3.getLeaseRenewer() != c5.getLeaseRenewer());
}
private FSDataOutputStream createFsOut(DFSClient dfs, String path)
throws IOException {
return new FSDataOutputStream(dfs.create(path, true), null);
}
static final ClientProtocol mcp = Mockito.mock(ClientProtocol.class);
static public DFSClient createDFSClientAs(UserGroupInformation ugi,
final Configuration conf) throws Exception {
return ugi.doAs(new PrivilegedExceptionAction<DFSClient>() {
@Override
public DFSClient run() throws Exception {
return new DFSClient(null, mcp, conf, null);
}
});
}
}
| 15,767 | 38.223881 | 108 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.BackupNode;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.net.DNS;
import org.apache.hadoop.test.PathUtils;
import org.junit.Test;
import java.io.File;
import java.io.IOException;
import java.net.UnknownHostException;
import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
/**
* This test checks correctness of port usage by hdfs components:
* NameNode, DataNode, SecondaryNamenode and BackupNode.
*
* The correct behavior is:<br>
* - when a specific port is provided the server must either start on that port
* or fail by throwing {@link java.net.BindException}.<br>
* - if the port = 0 (ephemeral) then the server should choose
* a free port and start on it.
*/
public class TestHDFSServerPorts {
public static final Log LOG = LogFactory.getLog(TestHDFSServerPorts.class);
// reset default 0.0.0.0 addresses in order to avoid IPv6 problem
static final String THIS_HOST = getFullHostName() + ":0";
private static final File TEST_DATA_DIR = PathUtils.getTestDir(TestHDFSServerPorts.class);
static {
DefaultMetricsSystem.setMiniClusterMode(true);
}
Configuration config;
File hdfsDir;
/**
* Attempt to determine the fully qualified domain name for this host
* to compare during testing.
*
* This is necessary because in order for the BackupNode test to correctly
* work, the namenode must have its http server started with the fully
* qualified address, as this is the one the backupnode will attempt to start
* on as well.
*
* @return Fully qualified hostname, or 127.0.0.1 if can't determine
*/
public static String getFullHostName() {
try {
return DNS.getDefaultHost("default");
} catch (UnknownHostException e) {
LOG.warn("Unable to determine hostname. May interfere with obtaining " +
"valid test results.");
return "127.0.0.1";
}
}
public NameNode startNameNode() throws IOException {
return startNameNode(false);
}
/**
* Start the namenode.
*/
public NameNode startNameNode(boolean withService) throws IOException {
hdfsDir = new File(TEST_DATA_DIR, "dfs");
if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
}
config = new HdfsConfiguration();
config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
fileAsURI(new File(hdfsDir, "name1")).toString());
FileSystem.setDefaultUri(config, "hdfs://" + THIS_HOST);
if (withService) {
NameNode.setServiceAddress(config, THIS_HOST);
}
config.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, THIS_HOST);
DFSTestUtil.formatNameNode(config);
String[] args = new String[] {};
// NameNode will modify config with the ports it bound to
return NameNode.createNameNode(args, config);
}
/**
* Start the BackupNode
*/
public BackupNode startBackupNode(Configuration conf) throws IOException {
// Set up testing environment directories
hdfsDir = new File(TEST_DATA_DIR, "backupNode");
if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
}
File currDir = new File(hdfsDir, "name2");
File currDir2 = new File(currDir, "current");
File currDir3 = new File(currDir, "image");
assertTrue(currDir.mkdirs());
assertTrue(currDir2.mkdirs());
assertTrue(currDir3.mkdirs());
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
fileAsURI(new File(hdfsDir, "name2")).toString());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
"${" + DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + "}");
// Start BackupNode
String[] args = new String [] { StartupOption.BACKUP.getName() };
BackupNode bu = (BackupNode)NameNode.createNameNode(args, conf);
return bu;
}
/**
* Start the datanode.
*/
public DataNode startDataNode(int index, Configuration config)
throws IOException {
File dataNodeDir = new File(TEST_DATA_DIR, "data-" + index);
config.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataNodeDir.getPath());
String[] args = new String[] {};
// NameNode will modify config with the ports it bound to
return DataNode.createDataNode(args, config);
}
/**
* Stop the datanode.
*/
public void stopDataNode(DataNode dn) {
if (dn != null) {
dn.shutdown();
}
}
public void stopNameNode(NameNode nn) {
if (nn != null) {
nn.stop();
}
}
public Configuration getConfig() {
return this.config;
}
/**
* Check whether the namenode can be started.
*/
private boolean canStartNameNode(Configuration conf) throws IOException {
NameNode nn2 = null;
try {
nn2 = NameNode.createNameNode(new String[]{}, conf);
} catch(IOException e) {
if (e instanceof java.net.BindException)
return false;
throw e;
} finally {
stopNameNode(nn2);
}
return true;
}
/**
* Check whether the datanode can be started.
*/
private boolean canStartDataNode(Configuration conf) throws IOException {
DataNode dn = null;
try {
dn = DataNode.createDataNode(new String[]{}, conf);
} catch(IOException e) {
if (e instanceof java.net.BindException)
return false;
throw e;
} finally {
if(dn != null) dn.shutdown();
}
return true;
}
/**
* Check whether the secondary name-node can be started.
*/
@SuppressWarnings("deprecation")
private boolean canStartSecondaryNode(Configuration conf) throws IOException {
// Using full name allows us not to have to add deprecation tag to
// entire source file.
org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode sn = null;
try {
sn = new org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode(conf);
} catch(IOException e) {
if (e instanceof java.net.BindException)
return false;
throw e;
} finally {
if(sn != null) sn.shutdown();
}
return true;
}
/**
* Check whether the BackupNode can be started.
*/
private boolean canStartBackupNode(Configuration conf) throws IOException {
BackupNode bn = null;
try {
bn = startBackupNode(conf);
} catch(IOException e) {
if (e instanceof java.net.BindException)
return false;
throw e;
} finally {
if(bn != null) bn.stop();
}
return true;
}
@Test(timeout = 300000)
public void testNameNodePorts() throws Exception {
runTestNameNodePorts(false);
runTestNameNodePorts(true);
}
/**
* Verify namenode port usage.
*/
public void runTestNameNodePorts(boolean withService) throws Exception {
NameNode nn = null;
try {
nn = startNameNode(withService);
// start another namenode on the same port
Configuration conf2 = new HdfsConfiguration(config);
conf2.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
fileAsURI(new File(hdfsDir, "name2")).toString());
DFSTestUtil.formatNameNode(conf2);
boolean started = canStartNameNode(conf2);
assertFalse(started); // should fail
// start on a different main port
FileSystem.setDefaultUri(conf2, "hdfs://" + THIS_HOST);
started = canStartNameNode(conf2);
assertFalse(started); // should fail again
// reset conf2 since NameNode modifies it
FileSystem.setDefaultUri(conf2, "hdfs://" + THIS_HOST);
// different http port
conf2.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, THIS_HOST);
started = canStartNameNode(conf2);
if (withService) {
assertFalse("Should've failed on service port", started);
// reset conf2 since NameNode modifies it
FileSystem.setDefaultUri(conf2, "hdfs://" + THIS_HOST);
conf2.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, THIS_HOST);
// Set Service address
conf2.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, THIS_HOST);
started = canStartNameNode(conf2);
}
assertTrue(started);
} finally {
stopNameNode(nn);
}
}
/**
* Verify datanode port usage.
*/
@Test(timeout = 300000)
public void testDataNodePorts() throws Exception {
NameNode nn = null;
try {
nn = startNameNode();
// start data-node on the same port as name-node
Configuration conf2 = new HdfsConfiguration(config);
conf2.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, new File(hdfsDir, "data").getPath());
conf2.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY,
FileSystem.getDefaultUri(config).getAuthority());
conf2.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, THIS_HOST);
boolean started = canStartDataNode(conf2);
assertFalse(started); // should fail
// bind http server to the same port as name-node
conf2.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, THIS_HOST);
conf2.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY,
config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY));
started = canStartDataNode(conf2);
assertFalse(started); // should fail
// both ports are different from the name-node ones
conf2.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, THIS_HOST);
conf2.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, THIS_HOST);
conf2.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, THIS_HOST);
started = canStartDataNode(conf2);
assertTrue(started); // should start now
} finally {
stopNameNode(nn);
}
}
/**
* Verify secondary namenode port usage.
*/
@Test(timeout = 300000)
public void testSecondaryNodePorts() throws Exception {
NameNode nn = null;
try {
nn = startNameNode();
// bind http server to the same port as name-node
Configuration conf2 = new HdfsConfiguration(config);
conf2.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY));
LOG.info("= Starting 1 on: " +
conf2.get(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY));
boolean started = canStartSecondaryNode(conf2);
assertFalse(started); // should fail
// bind http server to a different port
conf2.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, THIS_HOST);
LOG.info("= Starting 2 on: " +
conf2.get(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY));
started = canStartSecondaryNode(conf2);
assertTrue(started); // should start now
} finally {
stopNameNode(nn);
}
}
/**
* Verify BackupNode port usage.
*/
@Test(timeout = 300000)
public void testBackupNodePorts() throws Exception {
NameNode nn = null;
try {
nn = startNameNode();
Configuration backup_config = new HdfsConfiguration(config);
backup_config.set(
DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY, THIS_HOST);
// bind http server to the same port as name-node
backup_config.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
backup_config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY));
LOG.info("= Starting 1 on: " + backup_config.get(
DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY));
assertFalse("Backup started on same port as Namenode",
canStartBackupNode(backup_config)); // should fail
// bind http server to a different port
backup_config.set(
DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, THIS_HOST);
LOG.info("= Starting 2 on: " + backup_config.get(
DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY));
boolean started = canStartBackupNode(backup_config);
assertTrue("Backup Namenode should've started", started); // should start now
} finally {
stopNameNode(nn);
}
}
}
| 13,481 | 33.045455 | 99 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
import static org.junit.Assert.assertEquals;
import java.io.IOException;
import java.util.Arrays;
import java.util.Comparator;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.junit.Test;
/**
* This test ensures the all types of data node report work correctly.
*/
public class TestDatanodeReport {
static final Log LOG = LogFactory.getLog(TestDatanodeReport.class);
final static private Configuration conf = new HdfsConfiguration();
final static private int NUM_OF_DATANODES = 4;
/**
* This test attempts to different types of datanode report.
*/
@Test
public void testDatanodeReport() throws Exception {
conf.setInt(
DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500); // 0.5s
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES).build();
try {
//wait until the cluster is up
cluster.waitActive();
final String bpid = cluster.getNamesystem().getBlockPoolId();
final List<DataNode> datanodes = cluster.getDataNodes();
final DFSClient client = cluster.getFileSystem().dfs;
assertReports(NUM_OF_DATANODES, DatanodeReportType.ALL, client, datanodes, bpid);
assertReports(NUM_OF_DATANODES, DatanodeReportType.LIVE, client, datanodes, bpid);
assertReports(0, DatanodeReportType.DEAD, client, datanodes, bpid);
// bring down one datanode
final DataNode last = datanodes.get(datanodes.size() - 1);
LOG.info("XXX shutdown datanode " + last.getDatanodeUuid());
last.shutdown();
DatanodeInfo[] nodeInfo = client.datanodeReport(DatanodeReportType.DEAD);
while (nodeInfo.length != 1) {
try {
Thread.sleep(500);
} catch (Exception e) {
}
nodeInfo = client.datanodeReport(DatanodeReportType.DEAD);
}
assertReports(NUM_OF_DATANODES, DatanodeReportType.ALL, client, datanodes, null);
assertReports(NUM_OF_DATANODES - 1, DatanodeReportType.LIVE, client, datanodes, null);
assertReports(1, DatanodeReportType.DEAD, client, datanodes, null);
Thread.sleep(5000);
assertGauge("ExpiredHeartbeats", 1, getMetrics("FSNamesystem"));
} finally {
cluster.shutdown();
}
}
final static Comparator<StorageReport> CMP = new Comparator<StorageReport>() {
@Override
public int compare(StorageReport left, StorageReport right) {
return left.getStorage().getStorageID().compareTo(
right.getStorage().getStorageID());
}
};
static void assertReports(int numDatanodes, DatanodeReportType type,
DFSClient client, List<DataNode> datanodes, String bpid) throws IOException {
final DatanodeInfo[] infos = client.datanodeReport(type);
assertEquals(numDatanodes, infos.length);
final DatanodeStorageReport[] reports = client.getDatanodeStorageReport(type);
assertEquals(numDatanodes, reports.length);
for(int i = 0; i < infos.length; i++) {
assertEquals(infos[i], reports[i].getDatanodeInfo());
final DataNode d = findDatanode(infos[i].getDatanodeUuid(), datanodes);
if (bpid != null) {
//check storage
final StorageReport[] computed = reports[i].getStorageReports();
Arrays.sort(computed, CMP);
final StorageReport[] expected = d.getFSDataset().getStorageReports(bpid);
Arrays.sort(expected, CMP);
assertEquals(expected.length, computed.length);
for(int j = 0; j < expected.length; j++) {
assertEquals(expected[j].getStorage().getStorageID(),
computed[j].getStorage().getStorageID());
}
}
}
}
static DataNode findDatanode(String id, List<DataNode> datanodes) {
for(DataNode d : datanodes) {
if (d.getDatanodeUuid().equals(id)) {
return d;
}
}
throw new IllegalStateException("Datnode " + id + " not in datanode list: "
+ datanodes);
}
}
| 5,380 | 38.277372 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.List;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.datanode.Replica;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class TestPipelines {
public static final Log LOG = LogFactory.getLog(TestPipelines.class);
private static final short REPL_FACTOR = 3;
private static final int RAND_LIMIT = 2000;
private static final int FILE_SIZE = 10000;
private MiniDFSCluster cluster;
private DistributedFileSystem fs;
private static Configuration conf;
static final Random rand = new Random(RAND_LIMIT);
static {
initLoggers();
setConfiguration();
}
@Before
public void startUpCluster() throws IOException {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL_FACTOR).build();
fs = cluster.getFileSystem();
}
@After
public void shutDownCluster() throws IOException {
if (fs != null)
fs.close();
if (cluster != null) {
cluster.shutdownDataNodes();
cluster.shutdown();
}
}
/**
* Creates and closes a file of certain length.
* Calls append to allow next write() operation to add to the end of it
* After write() invocation, calls hflush() to make sure that data sunk through
* the pipeline and check the state of the last block's replica.
* It supposes to be in RBW state
*
* @throws IOException in case of an error
*/
@Test
public void pipeline_01() throws IOException {
final String METHOD_NAME = GenericTestUtils.getMethodName();
if(LOG.isDebugEnabled()) {
LOG.debug("Running " + METHOD_NAME);
}
Path filePath = new Path("/" + METHOD_NAME + ".dat");
DFSTestUtil.createFile(fs, filePath, FILE_SIZE, REPL_FACTOR, rand.nextLong());
if(LOG.isDebugEnabled()) {
LOG.debug("Invoking append but doing nothing otherwise...");
}
FSDataOutputStream ofs = fs.append(filePath);
ofs.writeBytes("Some more stuff to write");
((DFSOutputStream) ofs.getWrappedStream()).hflush();
List<LocatedBlock> lb = cluster.getNameNodeRpc().getBlockLocations(
filePath.toString(), FILE_SIZE - 1, FILE_SIZE).getLocatedBlocks();
String bpid = cluster.getNamesystem().getBlockPoolId();
for (DataNode dn : cluster.getDataNodes()) {
Replica r = DataNodeTestUtils.fetchReplicaInfo(dn, bpid, lb.get(0)
.getBlock().getBlockId());
assertTrue("Replica on DN " + dn + " shouldn't be null", r != null);
assertEquals("Should be RBW replica on " + dn
+ " after sequence of calls append()/write()/hflush()",
HdfsServerConstants.ReplicaState.RBW, r.getState());
}
ofs.close();
}
/**
* These two test cases are already implemented by
*
* @link{TestReadWhileWriting}
*/
public void pipeline_02_03() {
}
static byte[] writeData(final FSDataOutputStream out, final int length)
throws IOException {
int bytesToWrite = length;
byte[] ret = new byte[bytesToWrite];
byte[] toWrite = new byte[1024];
int written = 0;
Random rb = new Random(rand.nextLong());
while (bytesToWrite > 0) {
rb.nextBytes(toWrite);
int bytesToWriteNext = (1024 < bytesToWrite) ? 1024 : bytesToWrite;
out.write(toWrite, 0, bytesToWriteNext);
System.arraycopy(toWrite, 0, ret, (ret.length - bytesToWrite),
bytesToWriteNext);
written += bytesToWriteNext;
if(LOG.isDebugEnabled()) {
LOG.debug("Written: " + bytesToWriteNext + "; Total: " + written);
}
bytesToWrite -= bytesToWriteNext;
}
return ret;
}
private static void setConfiguration() {
conf = new Configuration();
int customPerChecksumSize = 700;
int customBlockSize = customPerChecksumSize * 3;
conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 100);
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, customBlockSize / 2);
conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 0);
}
private static void initLoggers() {
DFSTestUtil.setNameNodeLogLevel(Level.ALL);
GenericTestUtils.setLogLevel(DataNode.LOG, Level.ALL);
GenericTestUtils.setLogLevel(DFSClient.LOG, Level.ALL);
}
}
| 5,798 | 34.359756 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.Test;
/**
* This class tests that a file need not be closed before its
* data can be read by another client.
*/
public class TestDatanodeDeath {
{
DFSTestUtil.setNameNodeLogLevel(Level.ALL);
GenericTestUtils.setLogLevel(DataNode.LOG, Level.ALL);
GenericTestUtils.setLogLevel(DFSClient.LOG, Level.ALL);
GenericTestUtils.setLogLevel(InterDatanodeProtocol.LOG, Level.ALL);
}
static final int blockSize = 8192;
static final int numBlocks = 2;
static final int fileSize = numBlocks * blockSize + 1;
static final int numDatanodes = 15;
static final short replication = 3;
final int numberOfFiles = 3;
final int numThreads = 5;
Workload[] workload = null;
//
// an object that does a bunch of transactions
//
static class Workload extends Thread {
private final short replication;
private final int numberOfFiles;
private final int id;
private final FileSystem fs;
private long stamp;
private final long myseed;
Workload(long myseed, FileSystem fs, int threadIndex, int numberOfFiles,
short replication, long stamp) {
this.myseed = myseed;
id = threadIndex;
this.fs = fs;
this.numberOfFiles = numberOfFiles;
this.replication = replication;
this.stamp = stamp;
}
// create a bunch of files. Write to them and then verify.
@Override
public void run() {
System.out.println("Workload starting ");
for (int i = 0; i < numberOfFiles; i++) {
Path filename = new Path(id + "." + i);
try {
System.out.println("Workload processing file " + filename);
FSDataOutputStream stm = createFile(fs, filename, replication);
DFSOutputStream dfstream = (DFSOutputStream)
(stm.getWrappedStream());
dfstream.setArtificialSlowdown(1000);
writeFile(stm, myseed);
stm.close();
checkFile(fs, filename, replication, numBlocks, fileSize, myseed);
} catch (Throwable e) {
System.out.println("Workload exception " + e);
assertTrue(e.toString(), false);
}
// increment the stamp to indicate that another file is done.
synchronized (this) {
stamp++;
}
}
}
public synchronized void resetStamp() {
this.stamp = 0;
}
public synchronized long getStamp() {
return stamp;
}
}
//
// creates a file and returns a descriptor for writing to it.
//
static private FSDataOutputStream createFile(FileSystem fileSys, Path name, short repl)
throws IOException {
// create and write a file that contains three blocks of data
FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), repl,
blockSize);
return stm;
}
//
// writes to file
//
static private void writeFile(FSDataOutputStream stm, long seed) throws IOException {
byte[] buffer = AppendTestUtil.randomBytes(seed, fileSize);
int mid = fileSize/2;
stm.write(buffer, 0, mid);
stm.write(buffer, mid, fileSize - mid);
}
//
// verify that the data written are sane
//
static private void checkFile(FileSystem fileSys, Path name, int repl,
int numblocks, int filesize, long seed)
throws IOException {
boolean done = false;
int attempt = 0;
long len = fileSys.getFileStatus(name).getLen();
assertTrue(name + " should be of size " + filesize +
" but found to be of size " + len,
len == filesize);
// wait till all full blocks are confirmed by the datanodes.
while (!done) {
attempt++;
try {
Thread.sleep(1000);
} catch (InterruptedException e) {}
done = true;
BlockLocation[] locations = fileSys.getFileBlockLocations(
fileSys.getFileStatus(name), 0, filesize);
if (locations.length < numblocks) {
if (attempt > 100) {
System.out.println("File " + name + " has only " +
locations.length + " blocks, " +
" but is expected to have " + numblocks +
" blocks.");
}
done = false;
continue;
}
for (int idx = 0; idx < locations.length; idx++) {
if (locations[idx].getHosts().length < repl) {
if (attempt > 100) {
System.out.println("File " + name + " has " +
locations.length + " blocks: " +
" The " + idx + " block has only " +
locations[idx].getHosts().length +
" replicas but is expected to have "
+ repl + " replicas.");
}
done = false;
break;
}
}
}
FSDataInputStream stm = fileSys.open(name);
final byte[] expected = AppendTestUtil.randomBytes(seed, fileSize);
// do a sanity check. Read the file
byte[] actual = new byte[filesize];
stm.readFully(0, actual);
checkData(actual, 0, expected, "Read 1");
}
private static void checkData(byte[] actual, int from, byte[] expected, String message) {
for (int idx = 0; idx < actual.length; idx++) {
assertEquals(message+" byte "+(from+idx)+" differs. expected "+
expected[from+idx]+" actual "+actual[idx],
actual[idx], expected[from+idx]);
actual[idx] = 0;
}
}
/**
* A class that kills one datanode and recreates a new one. It waits to
* ensure that that all workers have finished at least one file since the
* last kill of a datanode. This guarantees that all three replicas of
* a block do not get killed (otherwise the file will be corrupt and the
* test will fail).
*/
class Modify extends Thread {
volatile boolean running;
final MiniDFSCluster cluster;
final Configuration conf;
Modify(Configuration conf, MiniDFSCluster cluster) {
running = true;
this.cluster = cluster;
this.conf = conf;
}
@Override
public void run() {
while (running) {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
continue;
}
// check if all threads have a new stamp.
// If so, then all workers have finished at least one file
// since the last stamp.
boolean loop = false;
for (int i = 0; i < numThreads; i++) {
if (workload[i].getStamp() == 0) {
loop = true;
break;
}
}
if (loop) {
continue;
}
// Now it is guaranteed that there will be at least one valid
// replica of a file.
for (int i = 0; i < replication - 1; i++) {
// pick a random datanode to shutdown
int victim = AppendTestUtil.nextInt(numDatanodes);
try {
System.out.println("Stopping datanode " + victim);
cluster.restartDataNode(victim);
// cluster.startDataNodes(conf, 1, true, null, null);
} catch (IOException e) {
System.out.println("TestDatanodeDeath Modify exception " + e);
assertTrue("TestDatanodeDeath Modify exception " + e, false);
running = false;
}
}
// set a new stamp for all workers
for (int i = 0; i < numThreads; i++) {
workload[i].resetStamp();
}
}
}
// Make the thread exit.
void close() {
running = false;
this.interrupt();
}
}
/**
* Test that writing to files is good even when datanodes in the pipeline
* dies.
*/
private void complexTest() throws IOException {
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 2000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 2);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 2);
conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5000);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(numDatanodes).build();
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
Modify modThread = null;
try {
// Create threads and make them run workload concurrently.
workload = new Workload[numThreads];
for (int i = 0; i < numThreads; i++) {
workload[i] = new Workload(AppendTestUtil.nextLong(), fs, i, numberOfFiles, replication, 0);
workload[i].start();
}
// Create a thread that kills existing datanodes and creates new ones.
modThread = new Modify(conf, cluster);
modThread.start();
// wait for all transactions to get over
for (int i = 0; i < numThreads; i++) {
try {
System.out.println("Waiting for thread " + i + " to complete...");
workload[i].join();
// if most of the threads are done, then stop restarting datanodes.
if (i >= numThreads/2) {
modThread.close();
}
} catch (InterruptedException e) {
i--; // retry
}
}
} finally {
if (modThread != null) {
modThread.close();
try {
modThread.join();
} catch (InterruptedException e) {}
}
fs.close();
cluster.shutdown();
}
}
/**
* Write to one file, then kill one datanode in the pipeline and then
* close the file.
*/
private void simpleTest(int datanodeToKill) throws IOException {
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 2000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 2);
conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5000);
int myMaxNodes = 5;
System.out.println("SimpleTest starting with DataNode to Kill " +
datanodeToKill);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(myMaxNodes).build();
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
short repl = 3;
Path filename = new Path("simpletest.dat");
try {
// create a file and write one block of data
System.out.println("SimpleTest creating file " + filename);
FSDataOutputStream stm = createFile(fs, filename, repl);
DFSOutputStream dfstream = (DFSOutputStream)
(stm.getWrappedStream());
// these are test settings
dfstream.setChunksPerPacket(5);
final long myseed = AppendTestUtil.nextLong();
byte[] buffer = AppendTestUtil.randomBytes(myseed, fileSize);
int mid = fileSize/4;
stm.write(buffer, 0, mid);
DatanodeInfo[] targets = dfstream.getPipeline();
int count = 5;
while (count-- > 0 && targets == null) {
try {
System.out.println("SimpleTest: Waiting for pipeline to be created.");
Thread.sleep(1000);
} catch (InterruptedException e) {
}
targets = dfstream.getPipeline();
}
if (targets == null) {
int victim = AppendTestUtil.nextInt(myMaxNodes);
System.out.println("SimpleTest stopping datanode random " + victim);
cluster.stopDataNode(victim);
} else {
int victim = datanodeToKill;
System.out.println("SimpleTest stopping datanode " + targets[victim]);
cluster.stopDataNode(targets[victim].getXferAddr());
}
System.out.println("SimpleTest stopping datanode complete");
// write some more data to file, close and verify
stm.write(buffer, mid, fileSize - mid);
stm.close();
checkFile(fs, filename, repl, numBlocks, fileSize, myseed);
} catch (Throwable e) {
System.out.println("Simple Workload exception " + e);
e.printStackTrace();
assertTrue(e.toString(), false);
} finally {
fs.close();
cluster.shutdown();
}
}
@Test
public void testSimple0() throws IOException {simpleTest(0);}
@Test
public void testSimple1() throws IOException {simpleTest(1);}
@Test
public void testSimple2() throws IOException {simpleTest(2);}
@Test
public void testComplex() throws IOException {complexTest();}
}
| 14,114 | 32.527316 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteConfigurationToDFS.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import java.io.OutputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.junit.Test;
/**
* Regression test for HDFS-1542, a deadlock between the main thread
* and the DFSOutputStream.DataStreamer thread caused because
* Configuration.writeXML holds a lock on itself while writing to DFS.
*/
public class TestWriteConfigurationToDFS {
@Test(timeout=60000)
public void testWriteConf() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096);
System.out.println("Setting conf in: " + System.identityHashCode(conf));
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
FileSystem fs = null;
OutputStream os = null;
try {
fs = cluster.getFileSystem();
Path filePath = new Path("/testWriteConf.xml");
os = fs.create(filePath);
StringBuilder longString = new StringBuilder();
for (int i = 0; i < 100000; i++) {
longString.append("hello");
} // 500KB
conf.set("foobar", longString.toString());
conf.writeXml(os);
os.close();
os = null;
fs.close();
fs = null;
} finally {
IOUtils.cleanup(null, os, fs);
cluster.shutdown();
}
}
}
| 2,214 | 35.311475 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.junit.Assume.assumeTrue;
import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
/**
* Tests if a data-node can startup depending on configuration parameters.
*/
public class TestDatanodeConfig {
private static final File BASE_DIR =
new File(MiniDFSCluster.getBaseDirectory());
private static MiniDFSCluster cluster;
@BeforeClass
public static void setUp() throws Exception {
clearBaseDir();
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_DATANODE_HTTPS_PORT_KEY, 0);
conf.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "localhost:0");
conf.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, "localhost:0");
conf.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "localhost:0");
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
}
@AfterClass
public static void tearDown() throws Exception {
if(cluster != null)
cluster.shutdown();
clearBaseDir();
}
private static void clearBaseDir() throws IOException {
if(BASE_DIR.exists() && ! FileUtil.fullyDelete(BASE_DIR))
throw new IOException("Cannot clear BASE_DIR " + BASE_DIR);
}
/**
* Test that a data-node does not start if configuration specifies
* incorrect URI scheme in data directory.
* Test that a data-node starts if data directory is specified as
* URI = "file:///path" or as a non URI path.
*/
@Test
public void testDataDirectories() throws IOException {
File dataDir = new File(BASE_DIR, "data").getCanonicalFile();
Configuration conf = cluster.getConfiguration(0);
// 1. Test unsupported schema. Only "file:" is supported.
String dnDir = makeURI("shv", null, fileAsURI(dataDir).getPath());
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dnDir);
DataNode dn = null;
try {
dn = DataNode.createDataNode(new String[]{}, conf);
fail();
} catch(Exception e) {
// expecting exception here
} finally {
if (dn != null) {
dn.shutdown();
}
}
assertNull("Data-node startup should have failed.", dn);
// 2. Test "file:" schema and no schema (path-only). Both should work.
String dnDir1 = fileAsURI(dataDir).toString() + "1";
String dnDir2 = makeURI("file", "localhost",
fileAsURI(dataDir).getPath() + "2");
String dnDir3 = dataDir.getAbsolutePath() + "3";
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
dnDir1 + "," + dnDir2 + "," + dnDir3);
try {
cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
assertTrue("Data-node should startup.", cluster.isDataNodeUp());
} finally {
if (cluster != null) {
cluster.shutdownDataNodes();
}
}
}
private static String makeURI(String scheme, String host, String path)
throws IOException {
try {
URI uDir = new URI(scheme, host, path, null);
return uDir.toString();
} catch(URISyntaxException e) {
throw new IOException("Bad URI", e);
}
}
@Test(timeout=60000)
public void testMemlockLimit() throws Exception {
assumeTrue(NativeIO.isAvailable());
final long memlockLimit =
NativeIO.POSIX.getCacheManipulator().getMemlockLimit();
// Can't increase the memlock limit past the maximum.
assumeTrue(memlockLimit != Long.MAX_VALUE);
File dataDir = new File(BASE_DIR, "data").getCanonicalFile();
Configuration conf = cluster.getConfiguration(0);
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
makeURI("file", null, fileAsURI(dataDir).getPath()));
long prevLimit = conf.
getLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_DEFAULT);
DataNode dn = null;
try {
// Try starting the DN with limit configured to the ulimit
conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
memlockLimit);
dn = DataNode.createDataNode(new String[]{}, conf);
dn.shutdown();
dn = null;
// Try starting the DN with a limit > ulimit
conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
memlockLimit+1);
try {
dn = DataNode.createDataNode(new String[]{}, conf);
} catch (RuntimeException e) {
GenericTestUtils.assertExceptionContains(
"more than the datanode's available RLIMIT_MEMLOCK", e);
}
} finally {
if (dn != null) {
dn.shutdown();
}
conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
prevLimit);
}
}
}
| 6,120 | 35.218935 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import java.util.ArrayList;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.junit.Test;
/**
* This test ensures that the balancer bandwidth is dynamically adjusted
* correctly.
*/
public class TestBalancerBandwidth {
final static private Configuration conf = new Configuration();
final static private int NUM_OF_DATANODES = 2;
final static private int DEFAULT_BANDWIDTH = 1024*1024;
public static final Log LOG = LogFactory.getLog(TestBalancerBandwidth.class);
@Test
public void testBalancerBandwidth() throws Exception {
/* Set bandwidthPerSec to a low value of 1M bps. */
conf.setLong(
DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY,
DEFAULT_BANDWIDTH);
/* Create and start cluster */
MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES).build();
try {
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
ArrayList<DataNode> datanodes = cluster.getDataNodes();
// Ensure value from the configuration is reflected in the datanodes.
assertEquals(DEFAULT_BANDWIDTH, (long) datanodes.get(0).getBalancerBandwidth());
assertEquals(DEFAULT_BANDWIDTH, (long) datanodes.get(1).getBalancerBandwidth());
// Dynamically change balancer bandwidth and ensure the updated value
// is reflected on the datanodes.
long newBandwidth = 12 * DEFAULT_BANDWIDTH; // 12M bps
fs.setBalancerBandwidth(newBandwidth);
// Give it a few seconds to propogate new the value to the datanodes.
try {
Thread.sleep(5000);
} catch (Exception e) {}
assertEquals(newBandwidth, (long) datanodes.get(0).getBalancerBandwidth());
assertEquals(newBandwidth, (long) datanodes.get(1).getBalancerBandwidth());
// Dynamically change balancer bandwidth to 0. Balancer bandwidth on the
// datanodes should remain as it was.
fs.setBalancerBandwidth(0);
// Give it a few seconds to propogate new the value to the datanodes.
try {
Thread.sleep(5000);
} catch (Exception e) {}
assertEquals(newBandwidth, (long) datanodes.get(0).getBalancerBandwidth());
assertEquals(newBandwidth, (long) datanodes.get(1).getBalancerBandwidth());
}finally {
cluster.shutdown();
}
}
public static void main(String[] args) throws Exception {
new TestBalancerBandwidth().testBalancerBandwidth();
}
}
| 3,462 | 36.236559 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import java.net.InetSocketAddress;
import java.net.URI;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.junit.Test;
/** Test NameNode port defaulting code. */
public class TestDefaultNameNodePort {
@Test
public void testGetAddressFromString() throws Exception {
assertEquals(NameNode.getAddress("foo").getPort(),
NameNode.DEFAULT_PORT);
assertEquals(NameNode.getAddress("hdfs://foo/").getPort(),
NameNode.DEFAULT_PORT);
assertEquals(NameNode.getAddress("hdfs://foo:555").getPort(),
555);
assertEquals(NameNode.getAddress("foo:555").getPort(),
555);
}
@Test
public void testGetAddressFromConf() throws Exception {
Configuration conf = new HdfsConfiguration();
FileSystem.setDefaultUri(conf, "hdfs://foo/");
assertEquals(NameNode.getAddress(conf).getPort(), NameNode.DEFAULT_PORT);
FileSystem.setDefaultUri(conf, "hdfs://foo:555/");
assertEquals(NameNode.getAddress(conf).getPort(), 555);
FileSystem.setDefaultUri(conf, "foo");
assertEquals(NameNode.getAddress(conf).getPort(), NameNode.DEFAULT_PORT);
}
@Test
public void testGetUri() {
assertEquals(NameNode.getUri(new InetSocketAddress("foo", 555)),
URI.create("hdfs://foo:555"));
assertEquals(NameNode.getUri(new InetSocketAddress("foo",
NameNode.DEFAULT_PORT)),
URI.create("hdfs://foo"));
}
}
| 2,467 | 36.969231 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
/**
* This test ensures the appropriate response (successful or failure) from
* the system when the system is started under various storage state and
* version conditions.
*/
public class TestDFSStorageStateRecovery {
private static final Log LOG = LogFactory.getLog(
"org.apache.hadoop.hdfs.TestDFSStorageStateRecovery");
private Configuration conf = null;
private int testCounter = 0;
private MiniDFSCluster cluster = null;
// Constants for indexes into test case table below.
private static final int CURRENT_EXISTS = 0;
private static final int PREVIOUS_EXISTS = 1;
private static final int PREVIOUS_TMP_EXISTS = 2;
private static final int REMOVED_TMP_EXISTS = 3;
private static final int SHOULD_RECOVER = 4;
private static final int CURRENT_SHOULD_EXIST_AFTER_RECOVER = 5;
private static final int PREVIOUS_SHOULD_EXIST_AFTER_RECOVER = 6;
/**
* The test case table. Each row represents a test case. This table is
* taken from the table in Apendix A of the HDFS Upgrade Test Plan
* (TestPlan-HdfsUpgrade.html) attached to
* http://issues.apache.org/jira/browse/HADOOP-702
*
* It has been slightly modified since previouscheckpoint.tmp no longer
* exists.
*
* The column meanings are:
* 0) current directory exists
* 1) previous directory exists
* 2) previous.tmp directory exists
* 3) removed.tmp directory exists
* 4) node should recover and startup
* 5) current directory should exist after recovery but before startup
* 6) previous directory should exist after recovery but before startup
*/
static final boolean[][] testCases = new boolean[][] {
new boolean[] {true, false, false, false, true, true, false}, // 1
new boolean[] {true, true, false, false, true, true, true }, // 2
new boolean[] {true, false, true, false, true, true, true }, // 3
new boolean[] {true, true, true, true, false, false, false}, // 4
new boolean[] {true, true, true, false, false, false, false}, // 4
new boolean[] {false, true, true, true, false, false, false}, // 4
new boolean[] {false, true, true, false, false, false, false}, // 4
new boolean[] {false, false, false, false, false, false, false}, // 5
new boolean[] {false, true, false, false, false, false, false}, // 6
new boolean[] {false, false, true, false, true, true, false}, // 7
new boolean[] {true, false, false, true, true, true, false}, // 8
new boolean[] {true, true, false, true, false, false, false}, // 9
new boolean[] {true, true, true, true, false, false, false}, // 10
new boolean[] {true, false, true, true, false, false, false}, // 10
new boolean[] {false, true, true, true, false, false, false}, // 10
new boolean[] {false, false, true, true, false, false, false}, // 10
new boolean[] {false, false, false, true, false, false, false}, // 11
new boolean[] {false, true, false, true, true, true, true }, // 12
// name-node specific cases
new boolean[] {true, true, false, false, true, true, false}, // 13
};
private static final int NUM_NN_TEST_CASES = testCases.length;
private static final int NUM_DN_TEST_CASES = 18;
/**
* Writes an INFO log message containing the parameters. Only
* the first 4 elements of the state array are included in the message.
*/
void log(String label, int numDirs, int testCaseNum, boolean[] state) {
LOG.info("============================================================");
LOG.info("***TEST " + (testCounter++) + "*** "
+ label + ":"
+ " numDirs="+numDirs
+ " testCase="+testCaseNum
+ " current="+state[CURRENT_EXISTS]
+ " previous="+state[PREVIOUS_EXISTS]
+ " previous.tmp="+state[PREVIOUS_TMP_EXISTS]
+ " removed.tmp="+state[REMOVED_TMP_EXISTS]
+ " should recover="+state[SHOULD_RECOVER]
+ " current exists after="+state[CURRENT_SHOULD_EXIST_AFTER_RECOVER]
+ " previous exists after="+state[PREVIOUS_SHOULD_EXIST_AFTER_RECOVER]);
}
/**
* Sets up the storage directories for namenode as defined by
* {@link DFSConfigKeys#DFS_NAMENODE_NAME_DIR_KEY}. For each element
* in {@link DFSConfigKeys#DFS_NAMENODE_NAME_DIR_KEY}, the subdirectories
* represented by the first four elements of the <code>state</code> array
* will be created and populated.
*
* See {@link UpgradeUtilities#createNameNodeStorageDirs()}
*
* @param state
* a row from the testCases table which indicates which directories
* to setup for the node
* @return file paths representing namenode storage directories
*/
String[] createNameNodeStorageState(boolean[] state) throws Exception {
String[] baseDirs = conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
UpgradeUtilities.createEmptyDirs(baseDirs);
if (state[CURRENT_EXISTS]) // current
UpgradeUtilities.createNameNodeStorageDirs(baseDirs, "current");
if (state[PREVIOUS_EXISTS]) // previous
UpgradeUtilities.createNameNodeStorageDirs(baseDirs, "previous");
if (state[PREVIOUS_TMP_EXISTS]) // previous.tmp
UpgradeUtilities.createNameNodeStorageDirs(baseDirs, "previous.tmp");
if (state[REMOVED_TMP_EXISTS]) // removed.tmp
UpgradeUtilities.createNameNodeStorageDirs(baseDirs, "removed.tmp");
return baseDirs;
}
/**
* Sets up the storage directories for a datanode under
* {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY}. For each element in
* {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY}, the subdirectories
* represented by the first four elements of the <code>state</code> array
* will be created and populated.
* See {@link UpgradeUtilities#createDataNodeStorageDirs()}
*
* @param state
* a row from the testCases table which indicates which directories
* to setup for the node
* @return file paths representing datanode storage directories
*/
String[] createDataNodeStorageState(boolean[] state) throws Exception {
String[] baseDirs = conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
UpgradeUtilities.createEmptyDirs(baseDirs);
if (state[CURRENT_EXISTS]) // current
UpgradeUtilities.createDataNodeStorageDirs(baseDirs, "current");
if (state[PREVIOUS_EXISTS]) // previous
UpgradeUtilities.createDataNodeStorageDirs(baseDirs, "previous");
if (state[PREVIOUS_TMP_EXISTS]) // previous.tmp
UpgradeUtilities.createDataNodeStorageDirs(baseDirs, "previous.tmp");
if (state[REMOVED_TMP_EXISTS]) // removed.tmp
UpgradeUtilities.createDataNodeStorageDirs(baseDirs, "removed.tmp");
return baseDirs;
}
/**
* Sets up the storage directories for a block pool under
* {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY}. For each element
* in {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY}, the subdirectories
* represented by the first four elements of the <code>state</code> array
* will be created and populated.
* See {@link UpgradeUtilities#createBlockPoolStorageDirs()}
*
* @param bpid block pool Id
* @param state
* a row from the testCases table which indicates which directories
* to setup for the node
* @return file paths representing block pool storage directories
*/
String[] createBlockPoolStorageState(String bpid, boolean[] state) throws Exception {
String[] baseDirs = conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
UpgradeUtilities.createEmptyDirs(baseDirs);
UpgradeUtilities.createDataNodeStorageDirs(baseDirs, "current");
// After copying the storage directories from master datanode, empty
// the block pool storage directories
String[] bpDirs = UpgradeUtilities.createEmptyBPDirs(baseDirs, bpid);
if (state[CURRENT_EXISTS]) // current
UpgradeUtilities.createBlockPoolStorageDirs(baseDirs, "current", bpid);
if (state[PREVIOUS_EXISTS]) // previous
UpgradeUtilities.createBlockPoolStorageDirs(baseDirs, "previous", bpid);
if (state[PREVIOUS_TMP_EXISTS]) // previous.tmp
UpgradeUtilities.createBlockPoolStorageDirs(baseDirs, "previous.tmp",
bpid);
if (state[REMOVED_TMP_EXISTS]) // removed.tmp
UpgradeUtilities
.createBlockPoolStorageDirs(baseDirs, "removed.tmp", bpid);
return bpDirs;
}
/**
* For NameNode, verify that the current and/or previous exist as indicated by
* the method parameters. If previous exists, verify that
* it hasn't been modified by comparing the checksum of all it's
* containing files with their original checksum. It is assumed that
* the server has recovered.
*/
void checkResultNameNode(String[] baseDirs,
boolean currentShouldExist, boolean previousShouldExist)
throws IOException
{
if (currentShouldExist) {
for (int i = 0; i < baseDirs.length; i++) {
assertTrue(new File(baseDirs[i],"current").isDirectory());
assertTrue(new File(baseDirs[i],"current/VERSION").isFile());
assertNotNull(FSImageTestUtil.findNewestImageFile(
baseDirs[i] + "/current"));
assertTrue(new File(baseDirs[i],"current/seen_txid").isFile());
}
}
if (previousShouldExist) {
for (int i = 0; i < baseDirs.length; i++) {
assertTrue(new File(baseDirs[i],"previous").isDirectory());
assertEquals(
UpgradeUtilities.checksumContents(
NAME_NODE, new File(baseDirs[i],"previous"), false),
UpgradeUtilities.checksumMasterNameNodeContents());
}
}
}
/**
* For datanode, verify that the current and/or previous exist as indicated by
* the method parameters. If previous exists, verify that
* it hasn't been modified by comparing the checksum of all it's
* containing files with their original checksum. It is assumed that
* the server has recovered.
*/
void checkResultDataNode(String[] baseDirs,
boolean currentShouldExist, boolean previousShouldExist)
throws IOException
{
if (currentShouldExist) {
for (int i = 0; i < baseDirs.length; i++) {
assertEquals(
UpgradeUtilities.checksumContents(DATA_NODE,
new File(baseDirs[i],"current"), false),
UpgradeUtilities.checksumMasterDataNodeContents());
}
}
if (previousShouldExist) {
for (int i = 0; i < baseDirs.length; i++) {
assertTrue(new File(baseDirs[i],"previous").isDirectory());
assertEquals(
UpgradeUtilities.checksumContents(DATA_NODE,
new File(baseDirs[i],"previous"), false),
UpgradeUtilities.checksumMasterDataNodeContents());
}
}
}
/**
* For block pool, verify that the current and/or previous exist as indicated
* by the method parameters. If previous exists, verify that
* it hasn't been modified by comparing the checksum of all it's
* containing files with their original checksum. It is assumed that
* the server has recovered.
* @param baseDirs directories pointing to block pool storage
* @param bpid block pool Id
* @param currentShouldExist current directory exists under storage
* @param currentShouldExist previous directory exists under storage
*/
void checkResultBlockPool(String[] baseDirs, boolean currentShouldExist,
boolean previousShouldExist) throws IOException
{
if (currentShouldExist) {
for (int i = 0; i < baseDirs.length; i++) {
File bpCurDir = new File(baseDirs[i], Storage.STORAGE_DIR_CURRENT);
assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, bpCurDir,
false), UpgradeUtilities.checksumMasterBlockPoolContents());
}
}
if (previousShouldExist) {
for (int i = 0; i < baseDirs.length; i++) {
File bpPrevDir = new File(baseDirs[i], Storage.STORAGE_DIR_PREVIOUS);
assertTrue(bpPrevDir.isDirectory());
assertEquals(
UpgradeUtilities.checksumContents(DATA_NODE, bpPrevDir,
false), UpgradeUtilities.checksumMasterBlockPoolContents());
}
}
}
private MiniDFSCluster createCluster(Configuration c) throws IOException {
return new MiniDFSCluster.Builder(c)
.numDataNodes(0)
.startupOption(StartupOption.REGULAR)
.format(false)
.manageDataDfsDirs(false)
.manageNameDfsDirs(false)
.build();
}
/**
* This test iterates over the testCases table and attempts
* to startup the NameNode normally.
*/
@Test
public void testNNStorageStates() throws Exception {
String[] baseDirs;
for (int numDirs = 1; numDirs <= 2; numDirs++) {
conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
for (int i = 0; i < NUM_NN_TEST_CASES; i++) {
boolean[] testCase = testCases[i];
boolean shouldRecover = testCase[SHOULD_RECOVER];
boolean curAfterRecover = testCase[CURRENT_SHOULD_EXIST_AFTER_RECOVER];
boolean prevAfterRecover = testCase[PREVIOUS_SHOULD_EXIST_AFTER_RECOVER];
log("NAME_NODE recovery", numDirs, i, testCase);
baseDirs = createNameNodeStorageState(testCase);
if (shouldRecover) {
cluster = createCluster(conf);
checkResultNameNode(baseDirs, curAfterRecover, prevAfterRecover);
cluster.shutdown();
} else {
try {
cluster = createCluster(conf);
throw new AssertionError("NameNode should have failed to start");
} catch (IOException expected) {
// the exception is expected
// check that the message says "not formatted"
// when storage directory is empty (case #5)
if(!testCases[i][CURRENT_EXISTS] && !testCases[i][PREVIOUS_TMP_EXISTS]
&& !testCases[i][PREVIOUS_EXISTS] && !testCases[i][REMOVED_TMP_EXISTS]) {
assertTrue(expected.getLocalizedMessage().contains(
"NameNode is not formatted"));
}
}
}
cluster.shutdown();
} // end testCases loop
} // end numDirs loop
}
/**
* This test iterates over the testCases table for Datanode storage and
* attempts to startup the DataNode normally.
*/
@Test
public void testDNStorageStates() throws Exception {
String[] baseDirs;
// First setup the datanode storage directory
for (int numDirs = 1; numDirs <= 2; numDirs++) {
conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
for (int i = 0; i < NUM_DN_TEST_CASES; i++) {
boolean[] testCase = testCases[i];
boolean shouldRecover = testCase[SHOULD_RECOVER];
boolean curAfterRecover = testCase[CURRENT_SHOULD_EXIST_AFTER_RECOVER];
boolean prevAfterRecover = testCase[PREVIOUS_SHOULD_EXIST_AFTER_RECOVER];
log("DATA_NODE recovery", numDirs, i, testCase);
createNameNodeStorageState(new boolean[] { true, true, false, false,
false });
cluster = createCluster(conf);
baseDirs = createDataNodeStorageState(testCase);
if (!testCase[CURRENT_EXISTS] && !testCase[PREVIOUS_EXISTS] && !testCase[PREVIOUS_TMP_EXISTS] && !testCase[REMOVED_TMP_EXISTS]) {
// DataNode will create and format current if no directories exist
cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
} else {
if (shouldRecover) {
cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
checkResultDataNode(baseDirs, curAfterRecover, prevAfterRecover);
} else {
cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
assertFalse(cluster.getDataNodes().get(0).isDatanodeUp());
}
}
cluster.shutdown();
} // end testCases loop
} // end numDirs loop
}
/**
* This test iterates over the testCases table for block pool storage and
* attempts to startup the DataNode normally.
*/
@Test
public void testBlockPoolStorageStates() throws Exception {
String[] baseDirs;
// First setup the datanode storage directory
String bpid = UpgradeUtilities.getCurrentBlockPoolID(null);
for (int numDirs = 1; numDirs <= 2; numDirs++) {
conf = new HdfsConfiguration();
conf.setInt("dfs.datanode.scan.period.hours", -1);
conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
for (int i = 0; i < NUM_DN_TEST_CASES; i++) {
boolean[] testCase = testCases[i];
boolean shouldRecover = testCase[SHOULD_RECOVER];
boolean curAfterRecover = testCase[CURRENT_SHOULD_EXIST_AFTER_RECOVER];
boolean prevAfterRecover = testCase[PREVIOUS_SHOULD_EXIST_AFTER_RECOVER];
log("BLOCK_POOL recovery", numDirs, i, testCase);
createNameNodeStorageState(new boolean[] { true, true, false, false,
false });
cluster = createCluster(conf);
baseDirs = createBlockPoolStorageState(bpid, testCase);
if (!testCase[CURRENT_EXISTS] && !testCase[PREVIOUS_EXISTS] && !testCase[PREVIOUS_TMP_EXISTS] && !testCase[REMOVED_TMP_EXISTS]) {
// DataNode will create and format current if no directories exist
cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
} else {
if (shouldRecover) {
cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
checkResultBlockPool(baseDirs, curAfterRecover, prevAfterRecover);
} else {
cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
assertFalse(cluster.getDataNodes().get(0).isBPServiceAlive(bpid));
}
}
cluster.shutdown();
} // end testCases loop
} // end numDirs loop
}
@Before
public void setUp() throws Exception {
LOG.info("Setting up the directory structures.");
UpgradeUtilities.initialize();
}
@After
public void tearDown() throws Exception {
LOG.info("Shutting down MiniDFSCluster");
if (cluster != null) cluster.shutdown();
}
}
| 20,317 | 43.459519 | 137 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendDifferentChecksum.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import java.io.IOException;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.Time;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Test;
/**
* Test cases for trying to append to a file with a different
* checksum than the file was originally written with.
*/
public class TestAppendDifferentChecksum {
private static final int SEGMENT_LENGTH = 1500;
// run the randomized test for 5 seconds
private static final long RANDOM_TEST_RUNTIME = 5000;
private static MiniDFSCluster cluster;
private static FileSystem fs;
@BeforeClass
public static void setupCluster() throws IOException {
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096);
conf.set("fs.hdfs.impl.disable.cache", "true");
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(1)
.build();
fs = cluster.getFileSystem();
}
@AfterClass
public static void teardown() throws IOException {
if (cluster != null) {
cluster.shutdown();
}
}
/**
* This test does not run, since switching chunksize with append
* is not implemented. Please see HDFS-2130 for a discussion of the
* difficulties in doing so.
*/
@Test
@Ignore("this is not implemented! See HDFS-2130")
public void testSwitchChunkSize() throws IOException {
FileSystem fsWithSmallChunk = createFsWithChecksum("CRC32", 512);
FileSystem fsWithBigChunk = createFsWithChecksum("CRC32", 1024);
Path p = new Path("/testSwitchChunkSize");
appendWithTwoFs(p, fsWithSmallChunk, fsWithBigChunk);
AppendTestUtil.check(fsWithSmallChunk, p, SEGMENT_LENGTH * 2);
AppendTestUtil.check(fsWithBigChunk, p, SEGMENT_LENGTH * 2);
}
/**
* Simple unit test which writes some data with one algorithm,
* then appends with another.
*/
@Test
public void testSwitchAlgorithms() throws IOException {
FileSystem fsWithCrc32 = createFsWithChecksum("CRC32", 512);
FileSystem fsWithCrc32C = createFsWithChecksum("CRC32C", 512);
Path p = new Path("/testSwitchAlgorithms");
appendWithTwoFs(p, fsWithCrc32, fsWithCrc32C);
// Regardless of which FS is used to read, it should pick up
// the on-disk checksum!
AppendTestUtil.check(fsWithCrc32C, p, SEGMENT_LENGTH * 2);
AppendTestUtil.check(fsWithCrc32, p, SEGMENT_LENGTH * 2);
}
/**
* Test which randomly alternates between appending with
* CRC32 and with CRC32C, crossing several block boundaries.
* Then, checks that all of the data can be read back correct.
*/
@Test(timeout=RANDOM_TEST_RUNTIME*2)
public void testAlgoSwitchRandomized() throws IOException {
FileSystem fsWithCrc32 = createFsWithChecksum("CRC32", 512);
FileSystem fsWithCrc32C = createFsWithChecksum("CRC32C", 512);
Path p = new Path("/testAlgoSwitchRandomized");
long seed = Time.now();
System.out.println("seed: " + seed);
Random r = new Random(seed);
// Create empty to start
IOUtils.closeStream(fsWithCrc32.create(p));
long st = Time.now();
int len = 0;
while (Time.now() - st < RANDOM_TEST_RUNTIME) {
int thisLen = r.nextInt(500);
FileSystem fs = (r.nextBoolean() ? fsWithCrc32 : fsWithCrc32C);
FSDataOutputStream stm = fs.append(p);
try {
AppendTestUtil.write(stm, len, thisLen);
} finally {
stm.close();
}
len += thisLen;
}
AppendTestUtil.check(fsWithCrc32, p, len);
AppendTestUtil.check(fsWithCrc32C, p, len);
}
private FileSystem createFsWithChecksum(String type, int bytes)
throws IOException {
Configuration conf = new Configuration(fs.getConf());
conf.set(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY, type);
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, bytes);
return FileSystem.get(conf);
}
private void appendWithTwoFs(Path p, FileSystem fs1, FileSystem fs2)
throws IOException {
FSDataOutputStream stm = fs1.create(p);
try {
AppendTestUtil.write(stm, 0, SEGMENT_LENGTH);
} finally {
stm.close();
}
stm = fs2.append(p);
try {
AppendTestUtil.write(stm, SEGMENT_LENGTH, SEGMENT_LENGTH);
} finally {
stm.close();
}
}
}
| 5,322 | 32.062112 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSClientAdapter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import java.io.IOException;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
public class DFSClientAdapter {
public static DFSClient getDFSClient(DistributedFileSystem dfs) {
return dfs.dfs;
}
public static void setDFSClient(DistributedFileSystem dfs, DFSClient client) {
dfs.dfs = client;
}
public static void stopLeaseRenewer(DistributedFileSystem dfs) throws IOException {
try {
dfs.dfs.getLeaseRenewer().interruptAndJoin();
} catch (InterruptedException e) {
throw new IOException(e);
}
}
public static LocatedBlocks callGetBlockLocations(ClientProtocol namenode,
String src, long start, long length) throws IOException {
return DFSClient.callGetBlockLocations(namenode, src, start, length);
}
public static ClientProtocol getNamenode(DFSClient client) throws IOException {
return client.namenode;
}
public static DFSClient getClient(DistributedFileSystem dfs)
throws IOException {
return dfs.dfs;
}
public static ExtendedBlock getPreviousBlock(DFSClient client, long fileId) {
return client.getPreviousBlock(fileId);
}
public static long getFileId(DFSOutputStream out) {
return out.getFileId();
}
}
| 2,169 | 32.384615 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteRead.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import java.io.IOException;
import java.util.EnumSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
public class TestWriteRead {
// Junit test settings.
private static final int WR_NTIMES = 350;
private static final int WR_CHUNK_SIZE = 10000;
private static final int BUFFER_SIZE = 8192 * 100;
private static final String ROOT_DIR = "/tmp/";
private static final long blockSize = 1024*100;
// command-line options. Different defaults for unit test vs real cluster
String filenameOption = ROOT_DIR + "fileX1";
int chunkSizeOption = 10000;
int loopOption = 10;
private MiniDFSCluster cluster;
private Configuration conf; // = new HdfsConfiguration();
private FileSystem mfs; // = cluster.getFileSystem();
private FileContext mfc; // = FileContext.getFileContext();
// configuration
private boolean useFCOption = false; // use either FileSystem or FileContext
private boolean verboseOption = true;
private boolean positionReadOption = false;
private boolean truncateOption = false;
private final boolean abortTestOnFailure = true;
static private Log LOG = LogFactory.getLog(TestWriteRead.class);
@Before
public void initJunitModeTest() throws Exception {
LOG.info("initJunitModeTest");
conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); // 100K
// blocksize
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
mfs = cluster.getFileSystem();
mfc = FileContext.getFileContext();
Path rootdir = new Path(ROOT_DIR);
mfs.mkdirs(rootdir);
}
@After
public void shutdown() {
cluster.shutdown();
}
// Equivalence of @Before for cluster mode testing.
private void initClusterModeTest() throws IOException {
LOG = LogFactory.getLog(TestWriteRead.class);
LOG.info("initClusterModeTest");
conf = new Configuration();
mfc = FileContext.getFileContext();
mfs = FileSystem.get(conf);
}
/** Junit Test reading while writing. */
@Test
public void testWriteReadSeq() throws IOException {
useFCOption = false;
positionReadOption = false;
String fname = filenameOption;
long rdBeginPos = 0;
// need to run long enough to fail: takes 25 to 35 seec on Mac
int stat = testWriteAndRead(fname, WR_NTIMES, WR_CHUNK_SIZE, rdBeginPos);
LOG.info("Summary status from test1: status= " + stat);
Assert.assertEquals(0, stat);
}
/** Junit Test position read while writing. */
@Test
public void testWriteReadPos() throws IOException {
String fname = filenameOption;
positionReadOption = true; // position read
long rdBeginPos = 0;
int stat = testWriteAndRead(fname, WR_NTIMES, WR_CHUNK_SIZE, rdBeginPos);
Assert.assertEquals(0, stat);
}
/** Junit Test position read of the current block being written. */
@Test
public void testReadPosCurrentBlock() throws IOException {
String fname = filenameOption;
positionReadOption = true; // position read
int wrChunkSize = (int)(blockSize) + (int)(blockSize/2);
long rdBeginPos = blockSize+1;
int numTimes=5;
int stat = testWriteAndRead(fname, numTimes, wrChunkSize, rdBeginPos);
Assert.assertEquals(0, stat);
}
// equivalent of TestWriteRead1
private int clusterTestWriteRead1() throws IOException {
long rdBeginPos = 0;
int stat = testWriteAndRead(filenameOption, loopOption, chunkSizeOption, rdBeginPos);
return stat;
}
/**
* Open the file to read from begin to end. Then close the file.
* Return number of bytes read.
* Support both sequential read and position read.
*/
private long readData(String fname, byte[] buffer, long byteExpected, long beginPosition)
throws IOException {
long totalByteRead = 0;
Path path = getFullyQualifiedPath(fname);
FSDataInputStream in = null;
try {
in = openInputStream(path);
long visibleLenFromReadStream = ((HdfsDataInputStream)in).getVisibleLength();
if (visibleLenFromReadStream < byteExpected)
{
throw new IOException(visibleLenFromReadStream
+ " = visibleLenFromReadStream < bytesExpected= "
+ byteExpected);
}
totalByteRead = readUntilEnd(in, buffer, buffer.length, fname,
beginPosition, visibleLenFromReadStream, positionReadOption);
in.close();
// reading more data than visibleLeng is OK, but not less
if (totalByteRead + beginPosition < byteExpected ){
throw new IOException("readData mismatch in byte read: expected="
+ byteExpected + " ; got " + (totalByteRead + beginPosition));
}
return totalByteRead + beginPosition;
} catch (IOException e) {
throw new IOException("##### Caught Exception in readData. "
+ "Total Byte Read so far = " + totalByteRead + " beginPosition = "
+ beginPosition, e);
} finally {
if (in != null)
in.close();
}
}
/**
* read chunks into buffer repeatedly until total of VisibleLen byte are read.
* Return total number of bytes read
*/
private long readUntilEnd(FSDataInputStream in, byte[] buffer, long size,
String fname, long pos, long visibleLen, boolean positionReadOption)
throws IOException {
if (pos >= visibleLen || visibleLen <= 0)
return 0;
int chunkNumber = 0;
long totalByteRead = 0;
long currentPosition = pos;
int byteRead = 0;
long byteLeftToRead = visibleLen - pos;
int byteToReadThisRound = 0;
if (!positionReadOption) {
in.seek(pos);
currentPosition = in.getPos();
}
if (verboseOption)
LOG.info("reader begin: position: " + pos + " ; currentOffset = "
+ currentPosition + " ; bufferSize =" + buffer.length
+ " ; Filename = " + fname);
try {
while (byteLeftToRead > 0 && currentPosition < visibleLen) {
byteToReadThisRound = (int) (byteLeftToRead >= buffer.length
? buffer.length : byteLeftToRead);
if (positionReadOption) {
byteRead = in.read(currentPosition, buffer, 0, byteToReadThisRound);
} else {
byteRead = in.read(buffer, 0, byteToReadThisRound);
}
if (byteRead <= 0)
break;
chunkNumber++;
totalByteRead += byteRead;
currentPosition += byteRead;
byteLeftToRead -= byteRead;
if (verboseOption) {
LOG.info("reader: Number of byte read: " + byteRead
+ " ; totalByteRead = " + totalByteRead + " ; currentPosition="
+ currentPosition + " ; chunkNumber =" + chunkNumber
+ "; File name = " + fname);
}
}
} catch (IOException e) {
throw new IOException(
"#### Exception caught in readUntilEnd: reader currentOffset = "
+ currentPosition + " ; totalByteRead =" + totalByteRead
+ " ; latest byteRead = " + byteRead + "; visibleLen= "
+ visibleLen + " ; bufferLen = " + buffer.length
+ " ; Filename = " + fname, e);
}
if (verboseOption)
LOG.info("reader end: position: " + pos + " ; currentOffset = "
+ currentPosition + " ; totalByteRead =" + totalByteRead
+ " ; Filename = " + fname);
return totalByteRead;
}
private void writeData(FSDataOutputStream out, byte[] buffer, int length)
throws IOException {
int totalByteWritten = 0;
int remainToWrite = length;
while (remainToWrite > 0) {
int toWriteThisRound = remainToWrite > buffer.length ? buffer.length
: remainToWrite;
out.write(buffer, 0, toWriteThisRound);
totalByteWritten += toWriteThisRound;
remainToWrite -= toWriteThisRound;
}
if (totalByteWritten != length) {
throw new IOException("WriteData: failure in write. Attempt to write "
+ length + " ; written=" + totalByteWritten);
}
}
/**
* Common routine to do position read while open the file for write.
* After each iteration of write, do a read of the file from begin to end.
* Return 0 on success, else number of failure.
*/
private int testWriteAndRead(String fname, int loopN, int chunkSize, long readBeginPosition)
throws IOException {
int countOfFailures = 0;
long byteVisibleToRead = 0;
FSDataOutputStream out = null;
byte[] outBuffer = new byte[BUFFER_SIZE];
byte[] inBuffer = new byte[BUFFER_SIZE];
for (int i = 0; i < BUFFER_SIZE; i++) {
outBuffer[i] = (byte) (i & 0x00ff);
}
try {
Path path = getFullyQualifiedPath(fname);
long fileLengthBeforeOpen = 0;
if (ifExists(path)) {
if (truncateOption) {
out = useFCOption ? mfc.create(path,EnumSet.of(CreateFlag.OVERWRITE)):
mfs.create(path, truncateOption);
LOG.info("File already exists. File open with Truncate mode: "+ path);
} else {
out = useFCOption ? mfc.create(path, EnumSet.of(CreateFlag.APPEND))
: mfs.append(path);
fileLengthBeforeOpen = getFileLengthFromNN(path);
LOG.info("File already exists of size " + fileLengthBeforeOpen
+ " File open for Append mode: " + path);
}
} else {
out = useFCOption ? mfc.create(path, EnumSet.of(CreateFlag.CREATE))
: mfs.create(path);
}
long totalByteWritten = fileLengthBeforeOpen;
long totalByteVisible = fileLengthBeforeOpen;
long totalByteWrittenButNotVisible = 0;
boolean toFlush;
for (int i = 0; i < loopN; i++) {
toFlush = (i % 2) == 0;
writeData(out, outBuffer, chunkSize);
totalByteWritten += chunkSize;
if (toFlush) {
out.hflush();
totalByteVisible += chunkSize + totalByteWrittenButNotVisible;
totalByteWrittenButNotVisible = 0;
} else {
totalByteWrittenButNotVisible += chunkSize;
}
if (verboseOption) {
LOG.info("TestReadWrite - Written " + chunkSize
+ ". Total written = " + totalByteWritten
+ ". TotalByteVisible = " + totalByteVisible + " to file "
+ fname);
}
byteVisibleToRead = readData(fname, inBuffer, totalByteVisible, readBeginPosition);
String readmsg = "Written=" + totalByteWritten + " ; Expected Visible="
+ totalByteVisible + " ; Got Visible=" + byteVisibleToRead
+ " of file " + fname;
if (byteVisibleToRead >= totalByteVisible
&& byteVisibleToRead <= totalByteWritten) {
readmsg = "pass: reader sees expected number of visible byte. "
+ readmsg + " [pass]";
} else {
countOfFailures++;
readmsg = "fail: reader see different number of visible byte. "
+ readmsg + " [fail]";
if (abortTestOnFailure) {
throw new IOException(readmsg);
}
}
LOG.info(readmsg);
}
// test the automatic flush after close
writeData(out, outBuffer, chunkSize);
totalByteWritten += chunkSize;
totalByteVisible += chunkSize + totalByteWrittenButNotVisible;
totalByteWrittenButNotVisible += 0;
out.close();
byteVisibleToRead = readData(fname, inBuffer, totalByteVisible, readBeginPosition);
String readmsg2 = "Written=" + totalByteWritten + " ; Expected Visible="
+ totalByteVisible + " ; Got Visible=" + byteVisibleToRead
+ " of file " + fname;
String readmsg;
if (byteVisibleToRead >= totalByteVisible
&& byteVisibleToRead <= totalByteWritten) {
readmsg = "pass: reader sees expected number of visible byte on close. "
+ readmsg2 + " [pass]";
} else {
countOfFailures++;
readmsg = "fail: reader sees different number of visible byte on close. "
+ readmsg2 + " [fail]";
LOG.info(readmsg);
if (abortTestOnFailure)
throw new IOException(readmsg);
}
// now check if NN got the same length
long lenFromFc = getFileLengthFromNN(path);
if (lenFromFc != byteVisibleToRead){
readmsg = "fail: reader sees different number of visible byte from NN "
+ readmsg2 + " [fail]";
throw new IOException(readmsg);
}
} catch (IOException e) {
throw new IOException(
"##### Caught Exception in testAppendWriteAndRead. Close file. "
+ "Total Byte Read so far = " + byteVisibleToRead, e);
} finally {
if (out != null)
out.close();
}
return -countOfFailures;
}
// //////////////////////////////////////////////////////////////////////
// // helper function:
// /////////////////////////////////////////////////////////////////////
private FSDataInputStream openInputStream(Path path) throws IOException {
FSDataInputStream in = useFCOption ? mfc.open(path) : mfs.open(path);
return in;
}
// length of a file (path name) from NN.
private long getFileLengthFromNN(Path path) throws IOException {
FileStatus fileStatus = useFCOption ? mfc.getFileStatus(path) :
mfs.getFileStatus(path);
return fileStatus.getLen();
}
private boolean ifExists(Path path) throws IOException {
return useFCOption ? mfc.util().exists(path) : mfs.exists(path);
}
private Path getFullyQualifiedPath(String pathString) {
return useFCOption ? mfc.makeQualified(new Path(ROOT_DIR, pathString))
: mfs.makeQualified(new Path(ROOT_DIR, pathString));
}
private void usage() {
LOG.info("Usage: [-useSeqRead | -usePosRead] [-append|truncate]"
+ " -chunkSize nn -loop ntimes -f filename");
System.out.println("Usage: [-useSeqRead | -usePosRead] [-append|truncate]"
+ " -chunkSize nn -loop ntimes -f filename");
System.out.println("Defaults: -chunkSize=10000, -loop=10, -f=/tmp/fileX1, "
+ "use sequential read, use append mode if file already exists");
System.exit(0);
}
private void dumpOptions() {
LOG.info(" Option setting: filenameOption = " + filenameOption);
LOG.info(" Option setting: chunkSizeOption = " + chunkSizeOption);
LOG.info(" Option setting: loopOption = " + loopOption);
LOG.info(" Option setting: posReadOption = " + positionReadOption);
LOG.info(" Option setting: truncateOption = " + truncateOption);
LOG.info(" Option setting: verboseOption = " + verboseOption);
}
private void getCmdLineOption(String[] args) {
for (int i = 0; i < args.length; i++) {
if (args[i].equals("-f")) {
filenameOption = args[++i];
} else if (args[i].equals("-chunkSize")) {
chunkSizeOption = Integer.parseInt(args[++i]);
} else if (args[i].equals("-loop")) {
loopOption = Integer.parseInt(args[++i]);
} else if (args[i].equals("-usePosRead")) {
positionReadOption = true;
} else if (args[i].equals("-useSeqRead")) {
positionReadOption = false;
} else if (args[i].equals("-truncate")) {
truncateOption = true;
} else if (args[i].equals("-append")) {
truncateOption = false;
} else if (args[i].equals("-verbose")) {
verboseOption = true;
} else if (args[i].equals("-noVerbose")) {
verboseOption = false;
} else {
usage();
}
}
if (verboseOption)
dumpOptions();
return;
}
/**
* Entry point of the test when using a real cluster.
* Usage: [-loop ntimes] [-chunkSize nn] [-f filename]
* [-useSeqRead |-usePosRead] [-append |-truncate] [-verbose |-noVerbose]
* -loop: iterate ntimes: each iteration consists of a write, then a read
* -chunkSize: number of byte for each write
* -f filename: filename to write and read
* [-useSeqRead | -usePosRead]: use Position Read, or default Sequential Read
* [-append | -truncate]: if file already exist, Truncate or default Append
* [-verbose | -noVerbose]: additional debugging messages if verbose is on
* Default: -loop = 10; -chunkSize = 10000; -f filename = /tmp/fileX1
* Use Sequential Read, Append Mode, verbose on.
*/
public static void main(String[] args) {
try {
TestWriteRead trw = new TestWriteRead();
trw.initClusterModeTest();
trw.getCmdLineOption(args);
int stat = trw.clusterTestWriteRead1();
if (stat == 0) {
System.out.println("Status: clusterTestWriteRead1 test PASS");
} else {
System.out.println("Status: clusterTestWriteRead1 test FAIL with "
+ stat + " failures");
}
System.exit(stat);
} catch (IOException e) {
LOG.info("#### Exception in Main");
e.printStackTrace();
System.exit(-2);
}
}
}
| 18,238 | 34.484436 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgradeRollback.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import java.io.File;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster;
import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.NNStorage;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.tools.DFSAdmin;
import org.junit.Assert;
import org.junit.Test;
/**
* This class tests rollback for rolling upgrade.
*/
public class TestRollingUpgradeRollback {
private static final int NUM_JOURNAL_NODES = 3;
private static final String JOURNAL_ID = "myjournal";
private static boolean fileExists(List<File> files) {
for (File file : files) {
if (file.exists()) {
return true;
}
}
return false;
}
private void checkNNStorage(NNStorage storage, long imageTxId,
long trashEndTxId) {
List<File> finalizedEdits = storage.getFiles(
NNStorage.NameNodeDirType.EDITS,
NNStorage.getFinalizedEditsFileName(1, imageTxId));
Assert.assertTrue(fileExists(finalizedEdits));
List<File> inprogressEdits = storage.getFiles(
NNStorage.NameNodeDirType.EDITS,
NNStorage.getInProgressEditsFileName(imageTxId + 1));
// For rollback case we will have an inprogress file for future transactions
Assert.assertTrue(fileExists(inprogressEdits));
if (trashEndTxId > 0) {
List<File> trashedEdits = storage.getFiles(
NNStorage.NameNodeDirType.EDITS,
NNStorage.getFinalizedEditsFileName(imageTxId + 1, trashEndTxId)
+ ".trash");
Assert.assertTrue(fileExists(trashedEdits));
}
String imageFileName = trashEndTxId > 0 ? NNStorage
.getImageFileName(imageTxId) : NNStorage
.getRollbackImageFileName(imageTxId);
List<File> imageFiles = storage.getFiles(
NNStorage.NameNodeDirType.IMAGE, imageFileName);
Assert.assertTrue(fileExists(imageFiles));
}
private void checkJNStorage(File dir, long discardStartTxId,
long discardEndTxId) {
File finalizedEdits = new File(dir, NNStorage.getFinalizedEditsFileName(1,
discardStartTxId - 1));
Assert.assertTrue(finalizedEdits.exists());
File trashEdits = new File(dir, NNStorage.getFinalizedEditsFileName(
discardStartTxId, discardEndTxId) + ".trash");
Assert.assertTrue(trashEdits.exists());
}
@Test
public void testRollbackCommand() throws Exception {
final Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
final Path foo = new Path("/foo");
final Path bar = new Path("/bar");
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final DFSAdmin dfsadmin = new DFSAdmin(conf);
dfs.mkdirs(foo);
// start rolling upgrade
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
Assert.assertEquals(0,
dfsadmin.run(new String[] { "-rollingUpgrade", "prepare" }));
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
// create new directory
dfs.mkdirs(bar);
// check NNStorage
NNStorage storage = cluster.getNamesystem().getFSImage().getStorage();
checkNNStorage(storage, 3, -1); // (startSegment, mkdir, endSegment)
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
NameNode nn = null;
try {
nn = NameNode.createNameNode(new String[] { "-rollingUpgrade",
"rollback" }, conf);
// make sure /foo is still there, but /bar is not
INode fooNode = nn.getNamesystem().getFSDirectory()
.getINode4Write(foo.toString());
Assert.assertNotNull(fooNode);
INode barNode = nn.getNamesystem().getFSDirectory()
.getINode4Write(bar.toString());
Assert.assertNull(barNode);
// check the details of NNStorage
NNStorage storage = nn.getNamesystem().getFSImage().getStorage();
// (startSegment, upgrade marker, mkdir, endSegment)
checkNNStorage(storage, 3, 7);
} finally {
if (nn != null) {
nn.stop();
nn.join();
}
}
}
@Test
public void testRollbackWithQJM() throws Exception {
final Configuration conf = new HdfsConfiguration();
MiniJournalCluster mjc = null;
MiniDFSCluster cluster = null;
final Path foo = new Path("/foo");
final Path bar = new Path("/bar");
try {
mjc = new MiniJournalCluster.Builder(conf).numJournalNodes(
NUM_JOURNAL_NODES).build();
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, mjc
.getQuorumJournalURI(JOURNAL_ID).toString());
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
DistributedFileSystem dfs = cluster.getFileSystem();
final DFSAdmin dfsadmin = new DFSAdmin(conf);
dfs.mkdirs(foo);
// start rolling upgrade
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
Assert.assertEquals(0,
dfsadmin.run(new String[] { "-rollingUpgrade", "prepare" }));
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
// create new directory
dfs.mkdirs(bar);
dfs.close();
// rollback
cluster.restartNameNode("-rollingUpgrade", "rollback");
// make sure /foo is still there, but /bar is not
dfs = cluster.getFileSystem();
Assert.assertTrue(dfs.exists(foo));
Assert.assertFalse(dfs.exists(bar));
// check storage in JNs
for (int i = 0; i < NUM_JOURNAL_NODES; i++) {
File dir = mjc.getCurrentDir(0, JOURNAL_ID);
// segments:(startSegment, mkdir, endSegment), (startSegment, upgrade
// marker, mkdir, endSegment)
checkJNStorage(dir, 4, 7);
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
if (mjc != null) {
mjc.shutdown();
}
}
}
/**
* Test rollback scenarios where StandbyNameNode does checkpoints during
* rolling upgrade.
*/
@Test
public void testRollbackWithHAQJM() throws Exception {
final Configuration conf = new HdfsConfiguration();
MiniQJMHACluster cluster = null;
final Path foo = new Path("/foo");
final Path bar = new Path("/bar");
try {
cluster = new MiniQJMHACluster.Builder(conf).build();
MiniDFSCluster dfsCluster = cluster.getDfsCluster();
dfsCluster.waitActive();
// let NN1 tail editlog every 1s
dfsCluster.getConfiguration(1).setInt(
DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
dfsCluster.restartNameNode(1);
dfsCluster.transitionToActive(0);
DistributedFileSystem dfs = dfsCluster.getFileSystem(0);
dfs.mkdirs(foo);
// start rolling upgrade
RollingUpgradeInfo info = dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
Assert.assertTrue(info.isStarted());
// create new directory
dfs.mkdirs(bar);
dfs.close();
TestRollingUpgrade.queryForPreparation(dfs);
// If the query returns true, both active and the standby NN should have
// rollback fsimage ready.
Assert.assertTrue(dfsCluster.getNameNode(0).getFSImage()
.hasRollbackFSImage());
Assert.assertTrue(dfsCluster.getNameNode(1).getFSImage()
.hasRollbackFSImage());
// rollback NN0
dfsCluster.restartNameNode(0, true, "-rollingUpgrade",
"rollback");
// shutdown NN1
dfsCluster.shutdownNameNode(1);
dfsCluster.transitionToActive(0);
// make sure /foo is still there, but /bar is not
dfs = dfsCluster.getFileSystem(0);
Assert.assertTrue(dfs.exists(foo));
Assert.assertFalse(dfs.exists(bar));
// check the details of NNStorage
NNStorage storage = dfsCluster.getNamesystem(0).getFSImage()
.getStorage();
// segments:(startSegment, mkdir, start upgrade endSegment),
// (startSegment, mkdir, endSegment)
checkNNStorage(storage, 4, 7);
// check storage in JNs
for (int i = 0; i < NUM_JOURNAL_NODES; i++) {
File dir = cluster.getJournalCluster().getCurrentDir(0,
MiniQJMHACluster.NAMESERVICE);
checkJNStorage(dir, 5, 7);
}
// restart NN0 again to make sure we can start using the new fsimage and
// the corresponding md5 checksum
dfsCluster.restartNameNode(0);
// start the rolling upgrade again to make sure we do not load upgrade
// status after the rollback
dfsCluster.transitionToActive(0);
dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
// TODO: rollback could not succeed in all JN
}
| 9,902 | 34.241993 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.util.EnumSet;
import java.util.List;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.fs.CreateFlag;
import org.mockito.invocation.InvocationOnMock;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.when;
import org.mockito.stubbing.Answer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
import org.apache.log4j.Level;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
/** This class implements some of tests posted in HADOOP-2658. */
public class TestFileAppend3 {
{
DFSTestUtil.setNameNodeLogLevel(Level.ALL);
GenericTestUtils.setLogLevel(DataNode.LOG, Level.ALL);
GenericTestUtils.setLogLevel(DFSClient.LOG, Level.ALL);
GenericTestUtils.setLogLevel(InterDatanodeProtocol.LOG, Level.ALL);
}
static final long BLOCK_SIZE = 64 * 1024;
static final short REPLICATION = 3;
static final int DATANODE_NUM = 5;
private static Configuration conf;
private static int buffersize;
private static MiniDFSCluster cluster;
private static DistributedFileSystem fs;
@BeforeClass
public static void setUp() throws java.lang.Exception {
AppendTestUtil.LOG.info("setUp()");
conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 512);
buffersize = conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build();
fs = cluster.getFileSystem();
}
@AfterClass
public static void tearDown() throws Exception {
AppendTestUtil.LOG.info("tearDown()");
if(fs != null) fs.close();
if(cluster != null) cluster.shutdown();
}
/**
* TC1: Append on block boundary.
* @throws IOException an exception might be thrown
*/
@Test
public void testTC1() throws Exception {
final Path p = new Path("/TC1/foo");
System.out.println("p=" + p);
//a. Create file and write one block of data. Close file.
final int len1 = (int)BLOCK_SIZE;
{
FSDataOutputStream out = fs.create(p, false, buffersize, REPLICATION, BLOCK_SIZE);
AppendTestUtil.write(out, 0, len1);
out.close();
}
// Reopen file to append. Append half block of data. Close file.
final int len2 = (int)BLOCK_SIZE/2;
{
FSDataOutputStream out = fs.append(p);
AppendTestUtil.write(out, len1, len2);
out.close();
}
//b. Reopen file and read 1.5 blocks worth of data. Close file.
AppendTestUtil.check(fs, p, len1 + len2);
}
@Test
public void testTC1ForAppend2() throws Exception {
final Path p = new Path("/TC1/foo2");
//a. Create file and write one block of data. Close file.
final int len1 = (int) BLOCK_SIZE;
{
FSDataOutputStream out = fs.create(p, false, buffersize, REPLICATION,
BLOCK_SIZE);
AppendTestUtil.write(out, 0, len1);
out.close();
}
// Reopen file to append. Append half block of data. Close file.
final int len2 = (int) BLOCK_SIZE / 2;
{
FSDataOutputStream out = fs.append(p,
EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null);
AppendTestUtil.write(out, len1, len2);
out.close();
}
// b. Reopen file and read 1.5 blocks worth of data. Close file.
AppendTestUtil.check(fs, p, len1 + len2);
}
/**
* TC2: Append on non-block boundary.
* @throws IOException an exception might be thrown
*/
@Test
public void testTC2() throws Exception {
final Path p = new Path("/TC2/foo");
System.out.println("p=" + p);
//a. Create file with one and a half block of data. Close file.
final int len1 = (int)(BLOCK_SIZE + BLOCK_SIZE/2);
{
FSDataOutputStream out = fs.create(p, false, buffersize, REPLICATION, BLOCK_SIZE);
AppendTestUtil.write(out, 0, len1);
out.close();
}
AppendTestUtil.check(fs, p, len1);
// Reopen file to append quarter block of data. Close file.
final int len2 = (int)BLOCK_SIZE/4;
{
FSDataOutputStream out = fs.append(p);
AppendTestUtil.write(out, len1, len2);
out.close();
}
//b. Reopen file and read 1.75 blocks of data. Close file.
AppendTestUtil.check(fs, p, len1 + len2);
}
@Test
public void testTC2ForAppend2() throws Exception {
final Path p = new Path("/TC2/foo2");
//a. Create file with one and a half block of data. Close file.
final int len1 = (int) (BLOCK_SIZE + BLOCK_SIZE / 2);
{
FSDataOutputStream out = fs.create(p, false, buffersize, REPLICATION,
BLOCK_SIZE);
AppendTestUtil.write(out, 0, len1);
out.close();
}
AppendTestUtil.check(fs, p, len1);
// Reopen file to append quarter block of data. Close file.
final int len2 = (int) BLOCK_SIZE / 4;
{
FSDataOutputStream out = fs.append(p, EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK),
4096, null);
AppendTestUtil.write(out, len1, len2);
out.close();
}
// b. Reopen file and read 1.75 blocks of data. Close file.
AppendTestUtil.check(fs, p, len1 + len2);
List<LocatedBlock> blocks = fs.getClient().getLocatedBlocks(
p.toString(), 0L).getLocatedBlocks();
Assert.assertEquals(3, blocks.size());
Assert.assertEquals(BLOCK_SIZE, blocks.get(0).getBlockSize());
Assert.assertEquals(BLOCK_SIZE / 2, blocks.get(1).getBlockSize());
Assert.assertEquals(BLOCK_SIZE / 4, blocks.get(2).getBlockSize());
}
/**
* TC5: Only one simultaneous append.
* @throws IOException an exception might be thrown
*/
@Test
public void testTC5() throws Exception {
final Path p = new Path("/TC5/foo");
System.out.println("p=" + p);
//a. Create file on Machine M1. Write half block to it. Close file.
{
FSDataOutputStream out = fs.create(p, false, buffersize, REPLICATION, BLOCK_SIZE);
AppendTestUtil.write(out, 0, (int)(BLOCK_SIZE/2));
out.close();
}
//b. Reopen file in "append" mode on Machine M1.
FSDataOutputStream out = fs.append(p);
//c. On Machine M2, reopen file in "append" mode. This should fail.
try {
AppendTestUtil.createHdfsWithDifferentUsername(conf).append(p);
fail("This should fail.");
} catch(IOException ioe) {
AppendTestUtil.LOG.info("GOOD: got an exception", ioe);
}
try {
((DistributedFileSystem) AppendTestUtil
.createHdfsWithDifferentUsername(conf)).append(p,
EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null);
fail("This should fail.");
} catch(IOException ioe) {
AppendTestUtil.LOG.info("GOOD: got an exception", ioe);
}
//d. On Machine M1, close file.
out.close();
}
@Test
public void testTC5ForAppend2() throws Exception {
final Path p = new Path("/TC5/foo2");
// a. Create file on Machine M1. Write half block to it. Close file.
{
FSDataOutputStream out = fs.create(p, false, buffersize, REPLICATION,
BLOCK_SIZE);
AppendTestUtil.write(out, 0, (int)(BLOCK_SIZE/2));
out.close();
}
// b. Reopen file in "append" mode on Machine M1.
FSDataOutputStream out = fs.append(p, EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK),
4096, null);
// c. On Machine M2, reopen file in "append" mode. This should fail.
try {
((DistributedFileSystem) AppendTestUtil
.createHdfsWithDifferentUsername(conf)).append(p,
EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null);
fail("This should fail.");
} catch(IOException ioe) {
AppendTestUtil.LOG.info("GOOD: got an exception", ioe);
}
try {
AppendTestUtil.createHdfsWithDifferentUsername(conf).append(p);
fail("This should fail.");
} catch(IOException ioe) {
AppendTestUtil.LOG.info("GOOD: got an exception", ioe);
}
// d. On Machine M1, close file.
out.close();
}
/**
* TC7: Corrupted replicas are present.
* @throws IOException an exception might be thrown
*/
private void testTC7(boolean appendToNewBlock) throws Exception {
final short repl = 2;
final Path p = new Path("/TC7/foo" + (appendToNewBlock ? "0" : "1"));
System.out.println("p=" + p);
//a. Create file with replication factor of 2. Write half block of data. Close file.
final int len1 = (int)(BLOCK_SIZE/2);
{
FSDataOutputStream out = fs.create(p, false, buffersize, repl, BLOCK_SIZE);
AppendTestUtil.write(out, 0, len1);
out.close();
}
DFSTestUtil.waitReplication(fs, p, repl);
//b. Log into one datanode that has one replica of this block.
// Find the block file on this datanode and truncate it to zero size.
final LocatedBlocks locatedblocks = fs.dfs.getNamenode().getBlockLocations(p.toString(), 0L, len1);
assertEquals(1, locatedblocks.locatedBlockCount());
final LocatedBlock lb = locatedblocks.get(0);
final ExtendedBlock blk = lb.getBlock();
assertEquals(len1, lb.getBlockSize());
DatanodeInfo[] datanodeinfos = lb.getLocations();
assertEquals(repl, datanodeinfos.length);
final DataNode dn = cluster.getDataNode(datanodeinfos[0].getIpcPort());
final File f = DataNodeTestUtils.getBlockFile(
dn, blk.getBlockPoolId(), blk.getLocalBlock());
final RandomAccessFile raf = new RandomAccessFile(f, "rw");
AppendTestUtil.LOG.info("dn=" + dn + ", blk=" + blk + " (length=" + blk.getNumBytes() + ")");
assertEquals(len1, raf.length());
raf.setLength(0);
raf.close();
//c. Open file in "append mode". Append a new block worth of data. Close file.
final int len2 = (int)BLOCK_SIZE;
{
FSDataOutputStream out = appendToNewBlock ?
fs.append(p, EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null) : fs.append(p);
AppendTestUtil.write(out, len1, len2);
out.close();
}
//d. Reopen file and read two blocks worth of data.
AppendTestUtil.check(fs, p, len1 + len2);
}
@Test
public void testTC7() throws Exception {
testTC7(false);
}
@Test
public void testTC7ForAppend2() throws Exception {
testTC7(true);
}
/**
* TC11: Racing rename
*/
private void testTC11(boolean appendToNewBlock) throws Exception {
final Path p = new Path("/TC11/foo" + (appendToNewBlock ? "0" : "1"));
System.out.println("p=" + p);
//a. Create file and write one block of data. Close file.
final int len1 = (int)BLOCK_SIZE;
{
FSDataOutputStream out = fs.create(p, false, buffersize, REPLICATION, BLOCK_SIZE);
AppendTestUtil.write(out, 0, len1);
out.close();
}
//b. Reopen file in "append" mode. Append half block of data.
FSDataOutputStream out = appendToNewBlock ?
fs.append(p, EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null) :
fs.append(p);
final int len2 = (int)BLOCK_SIZE/2;
AppendTestUtil.write(out, len1, len2);
out.hflush();
//c. Rename file to file.new.
final Path pnew = new Path(p + ".new");
assertTrue(fs.rename(p, pnew));
//d. Close file handle that was opened in (b).
out.close();
//check block sizes
final long len = fs.getFileStatus(pnew).getLen();
final LocatedBlocks locatedblocks = fs.dfs.getNamenode().getBlockLocations(pnew.toString(), 0L, len);
final int numblock = locatedblocks.locatedBlockCount();
for(int i = 0; i < numblock; i++) {
final LocatedBlock lb = locatedblocks.get(i);
final ExtendedBlock blk = lb.getBlock();
final long size = lb.getBlockSize();
if (i < numblock - 1) {
assertEquals(BLOCK_SIZE, size);
}
for(DatanodeInfo datanodeinfo : lb.getLocations()) {
final DataNode dn = cluster.getDataNode(datanodeinfo.getIpcPort());
final Block metainfo = DataNodeTestUtils.getFSDataset(dn).getStoredBlock(
blk.getBlockPoolId(), blk.getBlockId());
assertEquals(size, metainfo.getNumBytes());
}
}
}
@Test
public void testTC11() throws Exception {
testTC11(false);
}
@Test
public void testTC11ForAppend2() throws Exception {
testTC11(true);
}
/**
* TC12: Append to partial CRC chunk
*/
private void testTC12(boolean appendToNewBlock) throws Exception {
final Path p = new Path("/TC12/foo" + (appendToNewBlock ? "0" : "1"));
System.out.println("p=" + p);
//a. Create file with a block size of 64KB
// and a default io.bytes.per.checksum of 512 bytes.
// Write 25687 bytes of data. Close file.
final int len1 = 25687;
{
FSDataOutputStream out = fs.create(p, false, buffersize, REPLICATION, BLOCK_SIZE);
AppendTestUtil.write(out, 0, len1);
out.close();
}
//b. Reopen file in "append" mode. Append another 5877 bytes of data. Close file.
final int len2 = 5877;
{
FSDataOutputStream out = appendToNewBlock ?
fs.append(p, EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null) :
fs.append(p);
AppendTestUtil.write(out, len1, len2);
out.close();
}
//c. Reopen file and read 25687+5877 bytes of data from file. Close file.
AppendTestUtil.check(fs, p, len1 + len2);
if (appendToNewBlock) {
LocatedBlocks blks = fs.dfs.getLocatedBlocks(p.toString(), 0);
Assert.assertEquals(2, blks.getLocatedBlocks().size());
Assert.assertEquals(len1, blks.getLocatedBlocks().get(0).getBlockSize());
Assert.assertEquals(len2, blks.getLocatedBlocks().get(1).getBlockSize());
AppendTestUtil.check(fs, p, 0, len1);
AppendTestUtil.check(fs, p, len1, len2);
}
}
@Test
public void testTC12() throws Exception {
testTC12(false);
}
@Test
public void testTC12ForAppend2() throws Exception {
testTC12(true);
}
/**
* Append to a partial CRC chunk and the first write does not fill up the
* partial CRC trunk
*/
private void testAppendToPartialChunk(boolean appendToNewBlock)
throws IOException {
final Path p = new Path("/partialChunk/foo"
+ (appendToNewBlock ? "0" : "1"));
final int fileLen = 513;
System.out.println("p=" + p);
byte[] fileContents = AppendTestUtil.initBuffer(fileLen);
// create a new file.
FSDataOutputStream stm = AppendTestUtil.createFile(fs, p, 1);
// create 1 byte file
stm.write(fileContents, 0, 1);
stm.close();
System.out.println("Wrote 1 byte and closed the file " + p);
// append to file
stm = appendToNewBlock ?
fs.append(p, EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null) :
fs.append(p);
// Append to a partial CRC trunk
stm.write(fileContents, 1, 1);
stm.hflush();
// The partial CRC trunk is not full yet and close the file
stm.close();
System.out.println("Append 1 byte and closed the file " + p);
// write the remainder of the file
stm = appendToNewBlock ?
fs.append(p, EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null) :
fs.append(p);
// ensure getPos is set to reflect existing size of the file
assertEquals(2, stm.getPos());
// append to a partial CRC trunk
stm.write(fileContents, 2, 1);
// The partial chunk is not full yet, force to send a packet to DN
stm.hflush();
System.out.println("Append and flush 1 byte");
// The partial chunk is not full yet, force to send another packet to DN
stm.write(fileContents, 3, 2);
stm.hflush();
System.out.println("Append and flush 2 byte");
// fill up the partial chunk and close the file
stm.write(fileContents, 5, fileLen-5);
stm.close();
System.out.println("Flush 508 byte and closed the file " + p);
// verify that entire file is good
AppendTestUtil.checkFullFile(fs, p, fileLen,
fileContents, "Failed to append to a partial chunk");
}
// Do small appends.
void doSmallAppends(Path file, DistributedFileSystem fs, int iterations)
throws IOException {
for (int i = 0; i < iterations; i++) {
FSDataOutputStream stm;
try {
stm = fs.append(file);
} catch (IOException e) {
// If another thread is already appending, skip this time.
continue;
}
// Failure in write or close will be terminal.
AppendTestUtil.write(stm, 0, 123);
stm.close();
}
}
@Test
public void testSmallAppendRace() throws Exception {
final Path file = new Path("/testSmallAppendRace");
final String fName = file.toUri().getPath();
// Create the file and write a small amount of data.
FSDataOutputStream stm = fs.create(file);
AppendTestUtil.write(stm, 0, 123);
stm.close();
// Introduce a delay between getFileInfo and calling append() against NN.
final DFSClient client = DFSClientAdapter.getDFSClient(fs);
DFSClient spyClient = spy(client);
when(spyClient.getFileInfo(fName)).thenAnswer(new Answer<HdfsFileStatus>() {
@Override
public HdfsFileStatus answer(InvocationOnMock invocation){
try {
HdfsFileStatus stat = client.getFileInfo(fName);
Thread.sleep(100);
return stat;
} catch (Exception e) {
return null;
}
}
});
DFSClientAdapter.setDFSClient(fs, spyClient);
// Create two threads for doing appends to the same file.
Thread worker1 = new Thread() {
@Override
public void run() {
try {
doSmallAppends(file, fs, 20);
} catch (IOException e) {
}
}
};
Thread worker2 = new Thread() {
@Override
public void run() {
try {
doSmallAppends(file, fs, 20);
} catch (IOException e) {
}
}
};
worker1.start();
worker2.start();
// append will fail when the file size crosses the checksum chunk boundary,
// if append was called with a stale file stat.
doSmallAppends(file, fs, 20);
}
@Test
public void testAppendToPartialChunk() throws IOException {
testAppendToPartialChunk(false);
}
@Test
public void testAppendToPartialChunkforAppend2() throws IOException {
testAppendToPartialChunk(true);
}
}
| 19,928 | 32.104651 | 105 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Matchers.anyLong;
import static org.mockito.Matchers.anyObject;
import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.spy;
import java.io.IOException;
import java.io.OutputStream;
import java.util.List;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.Before;
import org.junit.Test;
/* File Append tests for HDFS-200 & HDFS-142, specifically focused on:
* using append()/sync() to recover block information
*/
public class TestFileAppend4 {
static final Log LOG = LogFactory.getLog(TestFileAppend4.class);
static final long BLOCK_SIZE = 1024;
static final long BBW_SIZE = 500; // don't align on bytes/checksum
static final Object [] NO_ARGS = new Object []{};
Configuration conf;
MiniDFSCluster cluster;
Path file1;
FSDataOutputStream stm;
final boolean simulatedStorage = false;
{
DFSTestUtil.setNameNodeLogLevel(Level.ALL);
GenericTestUtils.setLogLevel(DataNode.LOG, Level.ALL);
GenericTestUtils.setLogLevel(DFSClient.LOG, Level.ALL);
}
@Before
public void setUp() throws Exception {
this.conf = new Configuration();
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
// lower heartbeat interval for fast recognition of DN death
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5000);
// handle under-replicated blocks quickly (for replication asserts)
conf.setInt(
DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 5);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
// handle failures in the DFSClient pipeline quickly
// (for cluster.shutdown(); fs.close() idiom)
conf.setInt("ipc.client.connect.max.retries", 1);
}
/*
* Recover file.
* Try and open file in append mode.
* Doing this, we get a hold of the file that crashed writer
* was writing to. Once we have it, close it. This will
* allow subsequent reader to see up to last sync.
* NOTE: This is the same algorithm that HBase uses for file recovery
* @param fs
* @throws Exception
*/
private void recoverFile(final FileSystem fs) throws Exception {
LOG.info("Recovering File Lease");
// set the soft limit to be 1 second so that the
// namenode triggers lease recovery upon append request
cluster.setLeasePeriod(1000, HdfsServerConstants.LEASE_HARDLIMIT_PERIOD);
// Trying recovery
int tries = 60;
boolean recovered = false;
FSDataOutputStream out = null;
while (!recovered && tries-- > 0) {
try {
out = fs.append(file1);
LOG.info("Successfully opened for append");
recovered = true;
} catch (IOException e) {
LOG.info("Failed open for append, waiting on lease recovery");
try {
Thread.sleep(1000);
} catch (InterruptedException ex) {
// ignore it and try again
}
}
}
if (out != null) {
out.close();
}
if (!recovered) {
fail("Recovery should take < 1 min");
}
LOG.info("Past out lease recovery");
}
/**
* Test case that stops a writer after finalizing a block but
* before calling completeFile, and then tries to recover
* the lease from another thread.
*/
@Test(timeout=60000)
public void testRecoverFinalizedBlock() throws Throwable {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).build();
try {
cluster.waitActive();
NamenodeProtocols preSpyNN = cluster.getNameNodeRpc();
NamenodeProtocols spyNN = spy(preSpyNN);
// Delay completeFile
GenericTestUtils.DelayAnswer delayer = new GenericTestUtils.DelayAnswer(LOG);
doAnswer(delayer).when(spyNN).complete(
anyString(), anyString(), (ExtendedBlock)anyObject(), anyLong());
DFSClient client = new DFSClient(null, spyNN, conf, null);
file1 = new Path("/testRecoverFinalized");
final OutputStream stm = client.create("/testRecoverFinalized", true);
// write 1/2 block
AppendTestUtil.write(stm, 0, 4096);
final AtomicReference<Throwable> err = new AtomicReference<Throwable>();
Thread t = new Thread() {
@Override
public void run() {
try {
stm.close();
} catch (Throwable t) {
err.set(t);
}
}};
t.start();
LOG.info("Waiting for close to get to latch...");
delayer.waitForCall();
// At this point, the block is finalized on the DNs, but the file
// has not been completed in the NN.
// Lose the leases
LOG.info("Killing lease checker");
client.getLeaseRenewer().interruptAndJoin();
FileSystem fs1 = cluster.getFileSystem();
FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(
fs1.getConf());
LOG.info("Recovering file");
recoverFile(fs2);
LOG.info("Telling close to proceed.");
delayer.proceed();
LOG.info("Waiting for close to finish.");
t.join();
LOG.info("Close finished.");
// We expect that close will get a "File is not open"
// error.
Throwable thrownByClose = err.get();
assertNotNull(thrownByClose);
assertTrue(thrownByClose instanceof IOException);
if (!thrownByClose.getMessage().contains(
"No lease on /testRecoverFinalized"))
throw thrownByClose;
} finally {
cluster.shutdown();
}
}
/**
* Test case that stops a writer after finalizing a block but
* before calling completeFile, recovers a file from another writer,
* starts writing from that writer, and then has the old lease holder
* call completeFile
*/
@Test(timeout=60000)
public void testCompleteOtherLeaseHoldersFile() throws Throwable {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).build();
try {
cluster.waitActive();
NamenodeProtocols preSpyNN = cluster.getNameNodeRpc();
NamenodeProtocols spyNN = spy(preSpyNN);
// Delay completeFile
GenericTestUtils.DelayAnswer delayer =
new GenericTestUtils.DelayAnswer(LOG);
doAnswer(delayer).when(spyNN).complete(anyString(), anyString(),
(ExtendedBlock) anyObject(), anyLong());
DFSClient client = new DFSClient(null, spyNN, conf, null);
file1 = new Path("/testCompleteOtherLease");
final OutputStream stm = client.create("/testCompleteOtherLease", true);
// write 1/2 block
AppendTestUtil.write(stm, 0, 4096);
final AtomicReference<Throwable> err = new AtomicReference<Throwable>();
Thread t = new Thread() {
@Override
public void run() {
try {
stm.close();
} catch (Throwable t) {
err.set(t);
}
}};
t.start();
LOG.info("Waiting for close to get to latch...");
delayer.waitForCall();
// At this point, the block is finalized on the DNs, but the file
// has not been completed in the NN.
// Lose the leases
LOG.info("Killing lease checker");
client.getLeaseRenewer().interruptAndJoin();
FileSystem fs1 = cluster.getFileSystem();
FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(
fs1.getConf());
LOG.info("Recovering file");
recoverFile(fs2);
LOG.info("Opening file for append from new fs");
FSDataOutputStream appenderStream = fs2.append(file1);
LOG.info("Writing some data from new appender");
AppendTestUtil.write(appenderStream, 0, 4096);
LOG.info("Telling old close to proceed.");
delayer.proceed();
LOG.info("Waiting for close to finish.");
t.join();
LOG.info("Close finished.");
// We expect that close will get a "Lease mismatch"
// error.
Throwable thrownByClose = err.get();
assertNotNull(thrownByClose);
assertTrue(thrownByClose instanceof IOException);
if (!thrownByClose.getMessage().contains(
"Lease mismatch"))
throw thrownByClose;
// The appender should be able to close properly
appenderStream.close();
} finally {
cluster.shutdown();
}
}
/**
* Test the updation of NeededReplications for the Appended Block
*/
@Test(timeout = 60000)
public void testUpdateNeededReplicationsForAppendedFile() throws Exception {
Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
.build();
DistributedFileSystem fileSystem = null;
try {
// create a file.
fileSystem = cluster.getFileSystem();
Path f = new Path("/testAppend");
FSDataOutputStream create = fileSystem.create(f, (short) 2);
create.write("/testAppend".getBytes());
create.close();
// Append to the file.
FSDataOutputStream append = fileSystem.append(f);
append.write("/testAppend".getBytes());
append.close();
// Start a new datanode
cluster.startDataNodes(conf, 1, true, null, null);
// Check for replications
DFSTestUtil.waitReplication(fileSystem, f, (short) 2);
} finally {
if (null != fileSystem) {
fileSystem.close();
}
cluster.shutdown();
}
}
/**
* Test that an append with no locations fails with an exception
* showing insufficient locations.
*/
@Test(timeout = 60000)
public void testAppendInsufficientLocations() throws Exception {
Configuration conf = new Configuration();
// lower heartbeat interval for fast recognition of DN
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 3000);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4)
.build();
DistributedFileSystem fileSystem = null;
try {
// create a file with replication 3
fileSystem = cluster.getFileSystem();
Path f = new Path("/testAppend");
FSDataOutputStream create = fileSystem.create(f, (short) 2);
create.write("/testAppend".getBytes());
create.close();
// Check for replications
DFSTestUtil.waitReplication(fileSystem, f, (short) 2);
// Shut down all DNs that have the last block location for the file
LocatedBlocks lbs = fileSystem.dfs.getNamenode().
getBlockLocations("/testAppend", 0, Long.MAX_VALUE);
List<DataNode> dnsOfCluster = cluster.getDataNodes();
DatanodeInfo[] dnsWithLocations = lbs.getLastLocatedBlock().
getLocations();
for( DataNode dn : dnsOfCluster) {
for(DatanodeInfo loc: dnsWithLocations) {
if(dn.getDatanodeId().equals(loc)){
dn.shutdown();
DFSTestUtil.waitForDatanodeDeath(dn);
}
}
}
// Wait till 0 replication is recognized
DFSTestUtil.waitReplication(fileSystem, f, (short) 0);
// Append to the file, at this state there are 3 live DNs but none of them
// have the block.
try{
fileSystem.append(f);
fail("Append should fail because insufficient locations");
} catch (IOException e){
LOG.info("Expected exception: ", e);
}
FSDirectory dir = cluster.getNamesystem().getFSDirectory();
final INodeFile inode = INodeFile.
valueOf(dir.getINode("/testAppend"), "/testAppend");
assertTrue("File should remain closed", !inode.isUnderConstruction());
} finally {
if (null != fileSystem) {
fileSystem.close();
}
cluster.shutdown();
}
}
}
| 13,765 | 33.762626 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.DataOutputStream;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.Socket;
import java.util.List;
import java.util.Random;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.net.Peer;
import org.apache.hadoop.hdfs.net.TcpPeerServer;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.ShortCircuitRegistry;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetCache;
import org.apache.hadoop.hdfs.server.namenode.CacheManager;
import org.apache.hadoop.hdfs.shortcircuit.DfsClientShmManager;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplica;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.token.Token;
import org.apache.log4j.Level;
import org.apache.log4j.LogManager;
/**
* A helper class to setup the cluster, and get to BlockReader and DataNode for a block.
*/
public class BlockReaderTestUtil {
/**
* Returns true if we should run tests that generate large files (> 1GB)
*/
static public boolean shouldTestLargeFiles() {
String property = System.getProperty("hdfs.test.large.files");
if (property == null) return false;
if (property.isEmpty()) return true;
return Boolean.parseBoolean(property);
}
private HdfsConfiguration conf = null;
private MiniDFSCluster cluster = null;
/**
* Setup the cluster
*/
public BlockReaderTestUtil(int replicationFactor) throws Exception {
this(replicationFactor, new HdfsConfiguration());
}
public BlockReaderTestUtil(int replicationFactor, HdfsConfiguration config) throws Exception {
this.conf = config;
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, replicationFactor);
cluster = new MiniDFSCluster.Builder(conf).format(true).build();
cluster.waitActive();
}
/**
* Shutdown cluster
*/
public void shutdown() {
if (cluster != null) {
cluster.shutdown();
}
}
public MiniDFSCluster getCluster() {
return cluster;
}
public HdfsConfiguration getConf() {
return conf;
}
/**
* Create a file of the given size filled with random data.
* @return File data.
*/
public byte[] writeFile(Path filepath, int sizeKB)
throws IOException {
FileSystem fs = cluster.getFileSystem();
// Write a file with the specified amount of data
DataOutputStream os = fs.create(filepath);
byte data[] = new byte[1024 * sizeKB];
new Random().nextBytes(data);
os.write(data);
os.close();
return data;
}
/**
* Get the list of Blocks for a file.
*/
public List<LocatedBlock> getFileBlocks(Path filepath, int sizeKB)
throws IOException {
// Return the blocks we just wrote
DFSClient dfsclient = getDFSClient();
return dfsclient.getNamenode().getBlockLocations(
filepath.toString(), 0, sizeKB * 1024).getLocatedBlocks();
}
/**
* Get the DFSClient.
*/
public DFSClient getDFSClient() throws IOException {
InetSocketAddress nnAddr = new InetSocketAddress("localhost", cluster.getNameNodePort());
return new DFSClient(nnAddr, conf);
}
/**
* Exercise the BlockReader and read length bytes.
*
* It does not verify the bytes read.
*/
public void readAndCheckEOS(BlockReader reader, int length, boolean expectEof)
throws IOException {
byte buf[] = new byte[1024];
int nRead = 0;
while (nRead < length) {
DFSClient.LOG.info("So far read " + nRead + " - going to read more.");
int n = reader.read(buf, 0, buf.length);
assertTrue(n > 0);
nRead += n;
}
if (expectEof) {
DFSClient.LOG.info("Done reading, expect EOF for next read.");
assertEquals(-1, reader.read(buf, 0, buf.length));
}
}
/**
* Get a BlockReader for the given block.
*/
public BlockReader getBlockReader(LocatedBlock testBlock, int offset, int lenToRead)
throws IOException {
return getBlockReader(cluster, testBlock, offset, lenToRead);
}
/**
* Get a BlockReader for the given block.
*/
public static BlockReader getBlockReader(MiniDFSCluster cluster,
LocatedBlock testBlock, int offset, int lenToRead) throws IOException {
InetSocketAddress targetAddr = null;
ExtendedBlock block = testBlock.getBlock();
DatanodeInfo[] nodes = testBlock.getLocations();
targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr());
final DistributedFileSystem fs = cluster.getFileSystem();
return new BlockReaderFactory(fs.getClient().getConf()).
setInetSocketAddress(targetAddr).
setBlock(block).
setFileName(targetAddr.toString()+ ":" + block.getBlockId()).
setBlockToken(testBlock.getBlockToken()).
setStartOffset(offset).
setLength(lenToRead).
setVerifyChecksum(true).
setClientName("BlockReaderTestUtil").
setDatanodeInfo(nodes[0]).
setClientCacheContext(ClientContext.getFromConf(fs.getConf())).
setCachingStrategy(CachingStrategy.newDefaultStrategy()).
setConfiguration(fs.getConf()).
setAllowShortCircuitLocalReads(true).
setRemotePeerFactory(new RemotePeerFactory() {
@Override
public Peer newConnectedPeer(InetSocketAddress addr,
Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
throws IOException {
Peer peer = null;
Socket sock = NetUtils.
getDefaultSocketFactory(fs.getConf()).createSocket();
try {
sock.connect(addr, HdfsServerConstants.READ_TIMEOUT);
sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
peer = TcpPeerServer.peerFromSocket(sock);
} finally {
if (peer == null) {
IOUtils.closeQuietly(sock);
}
}
return peer;
}
}).
build();
}
/**
* Get a DataNode that serves our testBlock.
*/
public DataNode getDataNode(LocatedBlock testBlock) {
DatanodeInfo[] nodes = testBlock.getLocations();
int ipcport = nodes[0].getIpcPort();
return cluster.getDataNode(ipcport);
}
public static void enableHdfsCachingTracing() {
LogManager.getLogger(CacheReplicationMonitor.class.getName()).setLevel(
Level.TRACE);
LogManager.getLogger(CacheManager.class.getName()).setLevel(
Level.TRACE);
LogManager.getLogger(FsDatasetCache.class.getName()).setLevel(
Level.TRACE);
}
public static void enableBlockReaderFactoryTracing() {
LogManager.getLogger(BlockReaderFactory.class.getName()).setLevel(
Level.TRACE);
LogManager.getLogger(ShortCircuitCache.class.getName()).setLevel(
Level.TRACE);
LogManager.getLogger(ShortCircuitReplica.class.getName()).setLevel(
Level.TRACE);
LogManager.getLogger(BlockReaderLocal.class.getName()).setLevel(
Level.TRACE);
}
public static void enableShortCircuitShmTracing() {
LogManager.getLogger(DfsClientShmManager.class.getName()).setLevel(
Level.TRACE);
LogManager.getLogger(ShortCircuitRegistry.class.getName()).setLevel(
Level.TRACE);
LogManager.getLogger(ShortCircuitShm.class.getName()).setLevel(
Level.TRACE);
LogManager.getLogger(DataNode.class.getName()).setLevel(
Level.TRACE);
}
}
| 8,918 | 33.436293 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSConfigKeys.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import org.apache.hadoop.hdfs.web.AuthFilter;
import org.junit.Assert;
import org.junit.Test;
public class TestDFSConfigKeys {
/**
* Make sure we keep the String literal up to date with what we'd get by calling
* class.getName.
*/
@Test
public void testStringLiteralDefaultWebFilter() {
Assert.assertEquals("The default webhdfs auth filter should make the FQCN of AuthFilter.",
AuthFilter.class.getName(), DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT);
}
}
| 1,342 | 34.342105 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPacket.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import java.util.Random;
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
import org.apache.hadoop.io.DataOutputBuffer;
import org.junit.Assert;
import org.junit.Test;
public class TestDFSPacket {
private static final int chunkSize = 512;
private static final int checksumSize = 4;
private static final int maxChunksPerPacket = 4;
@Test
public void testPacket() throws Exception {
Random r = new Random(12345L);
byte[] data = new byte[chunkSize];
r.nextBytes(data);
byte[] checksum = new byte[checksumSize];
r.nextBytes(checksum);
DataOutputBuffer os = new DataOutputBuffer(data.length * 2);
byte[] packetBuf = new byte[data.length * 2];
DFSPacket p = new DFSPacket(packetBuf, maxChunksPerPacket,
0, 0, checksumSize, false);
p.setSyncBlock(true);
p.writeData(data, 0, data.length);
p.writeChecksum(checksum, 0, checksum.length);
p.writeTo(os);
//we have set syncBlock to true, so the header has the maximum length
int headerLen = PacketHeader.PKT_MAX_HEADER_LEN;
byte[] readBuf = os.getData();
assertArrayRegionsEqual(readBuf, headerLen, checksum, 0, checksum.length);
assertArrayRegionsEqual(readBuf, headerLen + checksum.length, data, 0, data.length);
}
public static void assertArrayRegionsEqual(byte []buf1, int off1, byte []buf2,
int off2, int len) {
for (int i = 0; i < len; i++) {
if (buf1[off1 + i] != buf2[off2 + i]) {
Assert.fail("arrays differ at byte " + i + ". " +
"The first array has " + (int) buf1[off1 + i] +
", but the second array has " + (int) buf2[off2 + i]);
}
}
}
@Test
public void testAddParentsGetParents() throws Exception {
DFSPacket p = new DFSPacket(null, maxChunksPerPacket,
0, 0, checksumSize, false);
long parents[] = p.getTraceParents();
Assert.assertEquals(0, parents.length);
p.addTraceParent(123);
p.addTraceParent(123);
parents = p.getTraceParents();
Assert.assertEquals(1, parents.length);
Assert.assertEquals(123, parents[0]);
parents = p.getTraceParents(); // test calling 'get' again.
Assert.assertEquals(1, parents.length);
Assert.assertEquals(123, parents[0]);
p.addTraceParent(1);
p.addTraceParent(456);
p.addTraceParent(789);
parents = p.getTraceParents();
Assert.assertEquals(4, parents.length);
Assert.assertEquals(1, parents[0]);
Assert.assertEquals(123, parents[1]);
Assert.assertEquals(456, parents[2]);
Assert.assertEquals(789, parents[3]);
}
}
| 3,495 | 36.191489 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitReadNoChecksum.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.hamcrest.CoreMatchers.equalTo;
import java.io.File;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.net.unix.DomainSocket;
import org.apache.hadoop.net.unix.TemporarySocketDirectory;
import org.junit.AfterClass;
import org.junit.Assume;
import org.junit.Before;
import org.junit.BeforeClass;
public class TestParallelShortCircuitReadNoChecksum extends TestParallelReadUtil {
private static TemporarySocketDirectory sockDir;
@BeforeClass
static public void setupCluster() throws Exception {
if (DomainSocket.getLoadingFailureReason() != null) return;
DFSInputStream.tcpReadsDisabledForTesting = true;
sockDir = new TemporarySocketDirectory();
HdfsConfiguration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
new File(sockDir.getDir(), "TestParallelLocalRead.%d.sock").getAbsolutePath());
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY,
true);
DomainSocket.disableBindPathValidation();
setupCluster(1, conf);
}
@Before
public void before() {
Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
}
@AfterClass
static public void teardownCluster() throws Exception {
if (DomainSocket.getLoadingFailureReason() != null) return;
sockDir.close();
TestParallelReadUtil.teardownCluster();
}
}
| 2,320 | 36.435484 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientBlockVerification.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.verify;
import java.util.List;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
import org.apache.log4j.Level;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestClientBlockVerification {
static BlockReaderTestUtil util = null;
static final Path TEST_FILE = new Path("/test.file");
static final int FILE_SIZE_K = 256;
static LocatedBlock testBlock = null;
static {
((Log4JLogger)RemoteBlockReader2.LOG).getLogger().setLevel(Level.ALL);
}
@BeforeClass
public static void setupCluster() throws Exception {
final int REPLICATION_FACTOR = 1;
util = new BlockReaderTestUtil(REPLICATION_FACTOR);
util.writeFile(TEST_FILE, FILE_SIZE_K);
List<LocatedBlock> blkList = util.getFileBlocks(TEST_FILE, FILE_SIZE_K);
testBlock = blkList.get(0); // Use the first block to test
}
/**
* Verify that if we read an entire block, we send CHECKSUM_OK
*/
@Test
public void testBlockVerification() throws Exception {
RemoteBlockReader2 reader = (RemoteBlockReader2)spy(
util.getBlockReader(testBlock, 0, FILE_SIZE_K * 1024));
util.readAndCheckEOS(reader, FILE_SIZE_K * 1024, true);
verify(reader).sendReadResult(Status.CHECKSUM_OK);
reader.close();
}
/**
* Test that if we do an incomplete read, we don't call CHECKSUM_OK
*/
@Test
public void testIncompleteRead() throws Exception {
RemoteBlockReader2 reader = (RemoteBlockReader2)spy(
util.getBlockReader(testBlock, 0, FILE_SIZE_K * 1024));
util.readAndCheckEOS(reader, FILE_SIZE_K / 2 * 1024, false);
// We asked the blockreader for the whole file, and only read
// half of it, so no CHECKSUM_OK
verify(reader, never()).sendReadResult(Status.CHECKSUM_OK);
reader.close();
}
/**
* Test that if we ask for a half block, and read it all, we *do*
* send CHECKSUM_OK. The DN takes care of knowing whether it was
* the whole block or not.
*/
@Test
public void testCompletePartialRead() throws Exception {
// Ask for half the file
RemoteBlockReader2 reader = (RemoteBlockReader2)spy(
util.getBlockReader(testBlock, 0, FILE_SIZE_K * 1024 / 2));
// And read half the file
util.readAndCheckEOS(reader, FILE_SIZE_K * 1024 / 2, true);
verify(reader).sendReadResult(Status.CHECKSUM_OK);
reader.close();
}
/**
* Test various unaligned reads to make sure that we properly
* account even when we don't start or end on a checksum boundary
*/
@Test
public void testUnalignedReads() throws Exception {
int startOffsets[] = new int[] { 0, 3, 129 };
int lengths[] = new int[] { 30, 300, 512, 513, 1025 };
for (int startOffset : startOffsets) {
for (int length : lengths) {
DFSClient.LOG.info("Testing startOffset = " + startOffset + " and " +
" len=" + length);
RemoteBlockReader2 reader = (RemoteBlockReader2)spy(
util.getBlockReader(testBlock, startOffset, length));
util.readAndCheckEOS(reader, length, true);
verify(reader).sendReadResult(Status.CHECKSUM_OK);
reader.close();
}
}
}
@AfterClass
public static void teardownCluster() throws Exception {
util.shutdown();
}
}
| 4,377 | 33.746032 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSOutputSummer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.junit.Test;
/**
* This class tests if FSOutputSummer works correctly.
*/
public class TestFSOutputSummer {
private static final long seed = 0xDEADBEEFL;
private static final int BYTES_PER_CHECKSUM = 10;
private static final int BLOCK_SIZE = 2*BYTES_PER_CHECKSUM;
private static final int HALF_CHUNK_SIZE = BYTES_PER_CHECKSUM/2;
private static final int FILE_SIZE = 2*BLOCK_SIZE-1;
private static final short NUM_OF_DATANODES = 2;
private final byte[] expected = new byte[FILE_SIZE];
private final byte[] actual = new byte[FILE_SIZE];
private FileSystem fileSys;
/* create a file, write all data at once */
private void writeFile1(Path name) throws Exception {
FSDataOutputStream stm = fileSys.create(name, true,
fileSys.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY, 4096),
NUM_OF_DATANODES, BLOCK_SIZE);
stm.write(expected);
stm.close();
checkFile(name);
cleanupFile(name);
}
/* create a file, write data chunk by chunk */
private void writeFile2(Path name) throws Exception {
FSDataOutputStream stm = fileSys.create(name, true,
fileSys.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY, 4096),
NUM_OF_DATANODES, BLOCK_SIZE);
int i=0;
for( ;i<FILE_SIZE-BYTES_PER_CHECKSUM; i+=BYTES_PER_CHECKSUM) {
stm.write(expected, i, BYTES_PER_CHECKSUM);
}
stm.write(expected, i, FILE_SIZE-3*BYTES_PER_CHECKSUM);
stm.close();
checkFile(name);
cleanupFile(name);
}
/* create a file, write data with variable amount of data */
private void writeFile3(Path name) throws Exception {
FSDataOutputStream stm = fileSys.create(name, true,
fileSys.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY, 4096),
NUM_OF_DATANODES, BLOCK_SIZE);
stm.write(expected, 0, HALF_CHUNK_SIZE);
stm.write(expected, HALF_CHUNK_SIZE, BYTES_PER_CHECKSUM+2);
stm.write(expected, HALF_CHUNK_SIZE+BYTES_PER_CHECKSUM+2, 2);
stm.write(expected, HALF_CHUNK_SIZE+BYTES_PER_CHECKSUM+4, HALF_CHUNK_SIZE);
stm.write(expected, BLOCK_SIZE+4, BYTES_PER_CHECKSUM-4);
stm.write(expected, BLOCK_SIZE+BYTES_PER_CHECKSUM,
FILE_SIZE-3*BYTES_PER_CHECKSUM);
stm.close();
checkFile(name);
cleanupFile(name);
}
private void checkAndEraseData(byte[] actual, int from, byte[] expected,
String message) throws Exception {
for (int idx = 0; idx < actual.length; idx++) {
assertEquals(message+" byte "+(from+idx)+" differs. expected "+
expected[from+idx]+" actual "+actual[idx],
actual[idx], expected[from+idx]);
actual[idx] = 0;
}
}
private void checkFile(Path name) throws Exception {
FSDataInputStream stm = fileSys.open(name);
// do a sanity check. Read the file
stm.readFully(0, actual);
checkAndEraseData(actual, 0, expected, "Read Sanity Test");
stm.close();
// do a sanity check. Get the file checksum
fileSys.getFileChecksum(name);
}
private void cleanupFile(Path name) throws IOException {
assertTrue(fileSys.exists(name));
fileSys.delete(name, true);
assertTrue(!fileSys.exists(name));
}
/**
* Test write operation for output stream in DFS.
*/
@Test
public void testFSOutputSummer() throws Exception {
doTestFSOutputSummer("CRC32");
doTestFSOutputSummer("CRC32C");
doTestFSOutputSummer("NULL");
}
private void doTestFSOutputSummer(String checksumType) throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BYTES_PER_CHECKSUM);
conf.set(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY, checksumType);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_OF_DATANODES)
.build();
fileSys = cluster.getFileSystem();
try {
Path file = new Path("try.dat");
Random rand = new Random(seed);
rand.nextBytes(expected);
writeFile1(file);
writeFile2(file);
writeFile3(file);
} finally {
fileSys.close();
cluster.shutdown();
}
}
@Test
public void TestDFSCheckSumType() throws Exception{
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BYTES_PER_CHECKSUM);
conf.set(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY, "NULL");
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_OF_DATANODES)
.build();
fileSys = cluster.getFileSystem();
try {
Path file = new Path("try.dat");
Random rand = new Random(seed);
rand.nextBytes(expected);
writeFile1(file);
} finally {
fileSys.close();
cluster.shutdown();
}
}
}
| 6,319 | 36.39645 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Test the MiniDFSCluster functionality that allows "dfs.datanode.address",
* "dfs.datanode.http.address", and "dfs.datanode.ipc.address" to be
* configurable. The MiniDFSCluster.startDataNodes() API now has a parameter
* that will check these properties if told to do so.
*/
package org.apache.hadoop.hdfs;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.ArrayList;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.junit.Test;
public class TestDFSAddressConfig {
@Test
public void testDFSAddressConfig() throws IOException {
Configuration conf = new HdfsConfiguration();
/*-------------------------------------------------------------------------
* By default, the DataNode socket address should be localhost (127.0.0.1).
*------------------------------------------------------------------------*/
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
ArrayList<DataNode> dns = cluster.getDataNodes();
DataNode dn = dns.get(0);
String selfSocketAddr = dn.getXferAddress().toString();
System.out.println("DN Self Socket Addr == " + selfSocketAddr);
assertTrue(selfSocketAddr.contains("/127.0.0.1:"));
/*-------------------------------------------------------------------------
* Shut down the datanodes, reconfigure, and bring them back up.
* Even if told to use the configuration properties for dfs.datanode,
* MiniDFSCluster.startDataNodes() should use localhost as the default if
* the dfs.datanode properties are not set.
*------------------------------------------------------------------------*/
for (int i = 0; i < dns.size(); i++) {
DataNodeProperties dnp = cluster.stopDataNode(i);
assertNotNull("Should have been able to stop simulated datanode", dnp);
}
conf.unset(DFS_DATANODE_ADDRESS_KEY);
conf.unset(DFS_DATANODE_HTTP_ADDRESS_KEY);
conf.unset(DFS_DATANODE_IPC_ADDRESS_KEY);
cluster.startDataNodes(conf, 1, true, StartupOption.REGULAR,
null, null, null, false, true);
dns = cluster.getDataNodes();
dn = dns.get(0);
selfSocketAddr = dn.getXferAddress().toString();
System.out.println("DN Self Socket Addr == " + selfSocketAddr);
// assert that default self socket address is 127.0.0.1
assertTrue(selfSocketAddr.contains("/127.0.0.1:"));
/*-------------------------------------------------------------------------
* Shut down the datanodes, reconfigure, and bring them back up.
* This time, modify the dfs.datanode properties and make sure that they
* are used to configure sockets by MiniDFSCluster.startDataNodes().
*------------------------------------------------------------------------*/
for (int i = 0; i < dns.size(); i++) {
DataNodeProperties dnp = cluster.stopDataNode(i);
assertNotNull("Should have been able to stop simulated datanode", dnp);
}
conf.set(DFS_DATANODE_ADDRESS_KEY, "0.0.0.0:0");
conf.set(DFS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
conf.set(DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0");
cluster.startDataNodes(conf, 1, true, StartupOption.REGULAR,
null, null, null, false, true);
dns = cluster.getDataNodes();
dn = dns.get(0);
selfSocketAddr = dn.getXferAddress().toString();
System.out.println("DN Self Socket Addr == " + selfSocketAddr);
// assert that default self socket address is 0.0.0.0
assertTrue(selfSocketAddr.contains("/0.0.0.0:"));
cluster.shutdown();
}
}
| 4,892 | 41.181034 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.hamcrest.CoreMatchers.equalTo;
import java.io.File;
import java.io.IOException;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.net.unix.DomainSocket;
import org.apache.hadoop.net.unix.TemporarySocketDirectory;
import org.junit.Assume;
import org.junit.Test;
public class TestDFSInputStream {
private void testSkipInner(MiniDFSCluster cluster) throws IOException {
DistributedFileSystem fs = cluster.getFileSystem();
DFSClient client = fs.dfs;
Path file = new Path("/testfile");
int fileLength = 1 << 22;
byte[] fileContent = new byte[fileLength];
for (int i = 0; i < fileLength; i++) {
fileContent[i] = (byte) (i % 133);
}
FSDataOutputStream fout = fs.create(file);
fout.write(fileContent);
fout.close();
Random random = new Random();
for (int i = 3; i < 18; i++) {
DFSInputStream fin = client.open("/testfile");
for (long pos = 0; pos < fileLength;) {
long skip = random.nextInt(1 << i) + 1;
long skipped = fin.skip(skip);
if (pos + skip >= fileLength) {
assertEquals(fileLength, pos + skipped);
break;
} else {
assertEquals(skip, skipped);
pos += skipped;
int data = fin.read();
assertEquals(pos % 133, data);
pos += 1;
}
}
fin.close();
}
}
@Test(timeout=60000)
public void testSkipWithRemoteBlockReader() throws IOException {
Configuration conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER, true);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
testSkipInner(cluster);
} finally {
cluster.shutdown();
}
}
@Test(timeout=60000)
public void testSkipWithRemoteBlockReader2() throws IOException {
Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
testSkipInner(cluster);
} finally {
cluster.shutdown();
}
}
@Test(timeout=60000)
public void testSkipWithLocalBlockReader() throws IOException {
Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
DomainSocket.disableBindPathValidation();
Configuration conf = new Configuration();
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
new File(sockDir.getDir(),
"TestShortCircuitLocalRead._PORT.sock").getAbsolutePath());
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
DFSInputStream.tcpReadsDisabledForTesting = true;
testSkipInner(cluster);
} finally {
DFSInputStream.tcpReadsDisabledForTesting = false;
cluster.shutdown();
sockDir.close();
}
}
@Test(timeout=60000)
public void testSeekToNewSource() throws IOException {
Configuration conf = new Configuration();
MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
DistributedFileSystem fs = cluster.getFileSystem();
Path path = new Path("/testfile");
DFSTestUtil.createFile(fs, path, 1024, (short) 3, 0);
DFSInputStream fin = fs.dfs.open("/testfile");
try {
fin.seekToNewSource(100);
assertEquals(100, fin.getPos());
DatanodeInfo firstNode = fin.getCurrentDatanode();
assertNotNull(firstNode);
fin.seekToNewSource(100);
assertEquals(100, fin.getPos());
assertFalse(firstNode.equals(fin.getCurrentDatanode()));
} finally {
fin.close();
cluster.shutdown();
}
}
}
| 4,918 | 34.135714 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRenameWhileOpen.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.spy;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.server.namenode.FSEditLog;
import org.apache.log4j.Level;
import org.junit.Test;
import org.mockito.Mockito;
public class TestRenameWhileOpen {
{
DFSTestUtil.setNameNodeLogLevel(Level.ALL);
}
//TODO: un-comment checkFullFile once the lease recovery is done
private static void checkFullFile(FileSystem fs, Path p) throws IOException {
//TestFileCreation.checkFullFile(fs, p);
}
/**
* open /user/dir1/file1 /user/dir2/file2
* mkdir /user/dir3
* move /user/dir1 /user/dir3
*/
@Test
public void testWhileOpenRenameParent() throws IOException {
Configuration conf = new HdfsConfiguration();
final int MAX_IDLE_TIME = 2000; // 2s
conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, TestFileCreation.blockSize);
// create cluster
System.out.println("Test 1*****************************");
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = null;
try {
cluster.waitActive();
fs = cluster.getFileSystem();
// Normally, the in-progress edit log would be finalized by
// FSEditLog#endCurrentLogSegment. For testing purposes, we
// disable that here.
FSEditLog spyLog =
spy(cluster.getNameNode().getFSImage().getEditLog());
doNothing().when(spyLog).endCurrentLogSegment(Mockito.anyBoolean());
DFSTestUtil.setEditLogForTesting(cluster.getNamesystem(), spyLog);
final int nnport = cluster.getNameNodePort();
// create file1.
Path dir1 = new Path("/user/a+b/dir1");
Path file1 = new Path(dir1, "file1");
FSDataOutputStream stm1 = TestFileCreation.createFile(fs, file1, 1);
System.out.println("testFileCreationDeleteParent: "
+ "Created file " + file1);
TestFileCreation.writeFile(stm1);
stm1.hflush();
// create file2.
Path dir2 = new Path("/user/dir2");
Path file2 = new Path(dir2, "file2");
FSDataOutputStream stm2 = TestFileCreation.createFile(fs, file2, 1);
System.out.println("testFileCreationDeleteParent: "
+ "Created file " + file2);
TestFileCreation.writeFile(stm2);
stm2.hflush();
// move dir1 while file1 is open
Path dir3 = new Path("/user/dir3");
fs.mkdirs(dir3);
fs.rename(dir1, dir3);
// create file3
Path file3 = new Path(dir3, "file3");
FSDataOutputStream stm3 = fs.create(file3);
fs.rename(file3, new Path(dir3, "bozo"));
// Get a new block for the file.
TestFileCreation.writeFile(stm3, TestFileCreation.blockSize + 1);
stm3.hflush();
// Stop the NameNode before closing the files.
// This will ensure that the write leases are still active and present
// in the edit log. Simiarly, there should be a pending ADD_BLOCK_OP
// for file3, since we just added a block to that file.
cluster.getNameNode().stop();
// Restart cluster with the same namenode port as before.
cluster.shutdown();
try {Thread.sleep(2*MAX_IDLE_TIME);} catch (InterruptedException e) {}
cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport)
.format(false)
.build();
cluster.waitActive();
// restart cluster yet again. This triggers the code to read in
// persistent leases from the edit log.
cluster.shutdown();
try {Thread.sleep(5000);} catch (InterruptedException e) {}
cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport)
.format(false)
.build();
cluster.waitActive();
fs = cluster.getFileSystem();
Path newfile = new Path("/user/dir3/dir1", "file1");
assertTrue(!fs.exists(file1));
assertTrue(fs.exists(file2));
assertTrue(fs.exists(newfile));
checkFullFile(fs, newfile);
} finally {
fs.close();
cluster.shutdown();
}
}
/**
* open /user/dir1/file1 /user/dir2/file2
* move /user/dir1 /user/dir3
*/
@Test
public void testWhileOpenRenameParentToNonexistentDir() throws IOException {
Configuration conf = new HdfsConfiguration();
final int MAX_IDLE_TIME = 2000; // 2s
conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, 1);
System.out.println("Test 2************************************");
// create cluster
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = null;
try {
cluster.waitActive();
fs = cluster.getFileSystem();
final int nnport = cluster.getNameNodePort();
// create file1.
Path dir1 = new Path("/user/dir1");
Path file1 = new Path(dir1, "file1");
FSDataOutputStream stm1 = TestFileCreation.createFile(fs, file1, 1);
System.out.println("testFileCreationDeleteParent: "
+ "Created file " + file1);
TestFileCreation.writeFile(stm1);
stm1.hflush();
// create file2.
Path dir2 = new Path("/user/dir2");
Path file2 = new Path(dir2, "file2");
FSDataOutputStream stm2 = TestFileCreation.createFile(fs, file2, 1);
System.out.println("testFileCreationDeleteParent: "
+ "Created file " + file2);
TestFileCreation.writeFile(stm2);
stm2.hflush();
// move dir1 while file1 is open
Path dir3 = new Path("/user/dir3");
fs.rename(dir1, dir3);
// restart cluster with the same namenode port as before.
// This ensures that leases are persisted in fsimage.
cluster.shutdown();
try {Thread.sleep(2*MAX_IDLE_TIME);} catch (InterruptedException e) {}
cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport)
.format(false)
.build();
cluster.waitActive();
// restart cluster yet again. This triggers the code to read in
// persistent leases from fsimage.
cluster.shutdown();
try {Thread.sleep(5000);} catch (InterruptedException e) {}
cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport)
.format(false)
.build();
cluster.waitActive();
fs = cluster.getFileSystem();
Path newfile = new Path("/user/dir3", "file1");
assertTrue(!fs.exists(file1));
assertTrue(fs.exists(file2));
assertTrue(fs.exists(newfile));
checkFullFile(fs, newfile);
} finally {
fs.close();
cluster.shutdown();
}
}
/**
* open /user/dir1/file1
* mkdir /user/dir2
* move /user/dir1/file1 /user/dir2/
*/
@Test
public void testWhileOpenRenameToExistentDirectory() throws IOException {
Configuration conf = new HdfsConfiguration();
final int MAX_IDLE_TIME = 2000; // 2s
conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, 1);
System.out.println("Test 3************************************");
// create cluster
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = null;
try {
cluster.waitActive();
fs = cluster.getFileSystem();
final int nnport = cluster.getNameNodePort();
// create file1.
Path dir1 = new Path("/user/dir1");
Path file1 = new Path(dir1, "file1");
FSDataOutputStream stm1 = TestFileCreation.createFile(fs, file1, 1);
System.out.println("testFileCreationDeleteParent: " +
"Created file " + file1);
TestFileCreation.writeFile(stm1);
stm1.hflush();
Path dir2 = new Path("/user/dir2");
fs.mkdirs(dir2);
fs.rename(file1, dir2);
// restart cluster with the same namenode port as before.
// This ensures that leases are persisted in fsimage.
cluster.shutdown();
try {Thread.sleep(2*MAX_IDLE_TIME);} catch (InterruptedException e) {}
cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport)
.format(false)
.build();
cluster.waitActive();
// restart cluster yet again. This triggers the code to read in
// persistent leases from fsimage.
cluster.shutdown();
try {Thread.sleep(5000);} catch (InterruptedException e) {}
cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport)
.format(false)
.build();
cluster.waitActive();
fs = cluster.getFileSystem();
Path newfile = new Path("/user/dir2", "file1");
assertTrue(!fs.exists(file1));
assertTrue(fs.exists(newfile));
checkFullFile(fs, newfile);
} finally {
fs.close();
cluster.shutdown();
}
}
/**
* open /user/dir1/file1
* move /user/dir1/file1 /user/dir2/
*/
@Test
public void testWhileOpenRenameToNonExistentDirectory() throws IOException {
Configuration conf = new HdfsConfiguration();
final int MAX_IDLE_TIME = 2000; // 2s
conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, 1);
System.out.println("Test 4************************************");
// create cluster
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = null;
try {
cluster.waitActive();
fs = cluster.getFileSystem();
final int nnport = cluster.getNameNodePort();
// create file1.
Path dir1 = new Path("/user/dir1");
Path file1 = new Path(dir1, "file1");
FSDataOutputStream stm1 = TestFileCreation.createFile(fs, file1, 1);
System.out.println("testFileCreationDeleteParent: "
+ "Created file " + file1);
TestFileCreation.writeFile(stm1);
stm1.hflush();
Path dir2 = new Path("/user/dir2");
fs.rename(file1, dir2);
// restart cluster with the same namenode port as before.
// This ensures that leases are persisted in fsimage.
cluster.shutdown();
try {Thread.sleep(2*MAX_IDLE_TIME);} catch (InterruptedException e) {}
cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport)
.format(false)
.build();
cluster.waitActive();
// restart cluster yet again. This triggers the code to read in
// persistent leases from fsimage.
cluster.shutdown();
try {Thread.sleep(5000);} catch (InterruptedException e) {}
cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport)
.format(false)
.build();
cluster.waitActive();
fs = cluster.getFileSystem();
Path newfile = new Path("/user", "dir2");
assertTrue(!fs.exists(file1));
assertTrue(fs.exists(newfile));
checkFullFile(fs, newfile);
} finally {
fs.close();
cluster.shutdown();
}
}
}
| 13,226 | 37.008621 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.IOException;
import java.io.OutputStream;
import java.io.RandomAccessFile;
import java.net.InetSocketAddress;
import java.nio.file.FileVisitResult;
import java.nio.file.Files;
import java.nio.file.SimpleFileVisitor;
import java.nio.file.attribute.BasicFileAttributes;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.Random;
import java.util.List;
import java.util.concurrent.TimeoutException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.apache.hadoop.util.Time;
import org.junit.Test;
/**
* This class tests the replication of a DFS file.
*/
public class TestReplication {
private static final long seed = 0xDEADBEEFL;
private static final int blockSize = 8192;
private static final int fileSize = 16384;
private static final String racks[] = new String[] {
"/d1/r1", "/d1/r1", "/d1/r2", "/d1/r2", "/d1/r2", "/d2/r3", "/d2/r3"
};
private static final int numDatanodes = racks.length;
private static final Log LOG = LogFactory.getLog(
"org.apache.hadoop.hdfs.TestReplication");
private void writeFile(FileSystem fileSys, Path name, int repl)
throws IOException {
// create and write a file that contains three blocks of data
FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
(short) repl, blockSize);
byte[] buffer = new byte[fileSize];
Random rand = new Random(seed);
rand.nextBytes(buffer);
stm.write(buffer);
stm.close();
}
/* check if there are at least two nodes are on the same rack */
private void checkFile(FileSystem fileSys, Path name, int repl)
throws IOException {
Configuration conf = fileSys.getConf();
ClientProtocol namenode = NameNodeProxies.createProxy(conf, fileSys.getUri(),
ClientProtocol.class).getProxy();
waitForBlockReplication(name.toString(), namenode,
Math.min(numDatanodes, repl), -1);
LocatedBlocks locations = namenode.getBlockLocations(name.toString(),0,
Long.MAX_VALUE);
FileStatus stat = fileSys.getFileStatus(name);
BlockLocation[] blockLocations = fileSys.getFileBlockLocations(stat,0L,
Long.MAX_VALUE);
// verify that rack locations match
assertTrue(blockLocations.length == locations.locatedBlockCount());
for (int i = 0; i < blockLocations.length; i++) {
LocatedBlock blk = locations.get(i);
DatanodeInfo[] datanodes = blk.getLocations();
String[] topologyPaths = blockLocations[i].getTopologyPaths();
assertTrue(topologyPaths.length == datanodes.length);
for (int j = 0; j < topologyPaths.length; j++) {
boolean found = false;
for (int k = 0; k < racks.length; k++) {
if (topologyPaths[j].startsWith(racks[k])) {
found = true;
break;
}
}
assertTrue(found);
}
}
boolean isOnSameRack = true, isNotOnSameRack = true;
for (LocatedBlock blk : locations.getLocatedBlocks()) {
DatanodeInfo[] datanodes = blk.getLocations();
if (datanodes.length <= 1) break;
if (datanodes.length == 2) {
isNotOnSameRack = !(datanodes[0].getNetworkLocation().equals(
datanodes[1].getNetworkLocation()));
break;
}
isOnSameRack = false;
isNotOnSameRack = false;
for (int i = 0; i < datanodes.length-1; i++) {
LOG.info("datanode "+ i + ": "+ datanodes[i]);
boolean onRack = false;
for( int j=i+1; j<datanodes.length; j++) {
if( datanodes[i].getNetworkLocation().equals(
datanodes[j].getNetworkLocation()) ) {
onRack = true;
}
}
if (onRack) {
isOnSameRack = true;
}
if (!onRack) {
isNotOnSameRack = true;
}
if (isOnSameRack && isNotOnSameRack) break;
}
if (!isOnSameRack || !isNotOnSameRack) break;
}
assertTrue(isOnSameRack);
assertTrue(isNotOnSameRack);
}
private void cleanupFile(FileSystem fileSys, Path name) throws IOException {
assertTrue(fileSys.exists(name));
fileSys.delete(name, true);
assertTrue(!fileSys.exists(name));
}
private void testBadBlockReportOnTransfer(
boolean corruptBlockByDeletingBlockFile) throws Exception {
Configuration conf = new HdfsConfiguration();
FileSystem fs = null;
DFSClient dfsClient = null;
LocatedBlocks blocks = null;
int replicaCount = 0;
short replFactor = 1;
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
cluster.waitActive();
fs = cluster.getFileSystem();
dfsClient = new DFSClient(new InetSocketAddress("localhost",
cluster.getNameNodePort()), conf);
// Create file with replication factor of 1
Path file1 = new Path("/tmp/testBadBlockReportOnTransfer/file1");
DFSTestUtil.createFile(fs, file1, 1024, replFactor, 0);
DFSTestUtil.waitReplication(fs, file1, replFactor);
// Corrupt the block belonging to the created file
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, file1);
int blockFilesCorrupted =
corruptBlockByDeletingBlockFile?
cluster.corruptBlockOnDataNodesByDeletingBlockFile(block) :
cluster.corruptBlockOnDataNodes(block);
assertEquals("Corrupted too few blocks", replFactor, blockFilesCorrupted);
// Increase replication factor, this should invoke transfer request
// Receiving datanode fails on checksum and reports it to namenode
replFactor = 2;
fs.setReplication(file1, replFactor);
// Now get block details and check if the block is corrupt
blocks = dfsClient.getNamenode().
getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
while (blocks.get(0).isCorrupt() != true) {
try {
LOG.info("Waiting until block is marked as corrupt...");
Thread.sleep(1000);
} catch (InterruptedException ie) {
}
blocks = dfsClient.getNamenode().
getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
}
replicaCount = blocks.get(0).getLocations().length;
assertTrue(replicaCount == 1);
cluster.shutdown();
}
/*
* Test if Datanode reports bad blocks during replication request
*/
@Test
public void testBadBlockReportOnTransfer() throws Exception {
testBadBlockReportOnTransfer(false);
}
/*
* Test if Datanode reports bad blocks during replication request
* with missing block file
*/
@Test
public void testBadBlockReportOnTransferMissingBlockFile() throws Exception {
testBadBlockReportOnTransfer(true);
}
/**
* Tests replication in DFS.
*/
public void runReplication(boolean simulated) throws IOException {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, false);
if (simulated) {
SimulatedFSDataset.setFactory(conf);
}
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(numDatanodes)
.racks(racks).build();
cluster.waitActive();
InetSocketAddress addr = new InetSocketAddress("localhost",
cluster.getNameNodePort());
DFSClient client = new DFSClient(addr, conf);
DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
assertEquals("Number of Datanodes ", numDatanodes, info.length);
FileSystem fileSys = cluster.getFileSystem();
try {
Path file1 = new Path("/smallblocktest.dat");
writeFile(fileSys, file1, 3);
checkFile(fileSys, file1, 3);
cleanupFile(fileSys, file1);
writeFile(fileSys, file1, 10);
checkFile(fileSys, file1, 10);
cleanupFile(fileSys, file1);
writeFile(fileSys, file1, 4);
checkFile(fileSys, file1, 4);
cleanupFile(fileSys, file1);
writeFile(fileSys, file1, 1);
checkFile(fileSys, file1, 1);
cleanupFile(fileSys, file1);
writeFile(fileSys, file1, 2);
checkFile(fileSys, file1, 2);
cleanupFile(fileSys, file1);
} finally {
fileSys.close();
cluster.shutdown();
}
}
@Test
public void testReplicationSimulatedStorag() throws IOException {
runReplication(true);
}
@Test
public void testReplication() throws IOException {
runReplication(false);
}
// Waits for all of the blocks to have expected replication
private void waitForBlockReplication(String filename,
ClientProtocol namenode,
int expected, long maxWaitSec)
throws IOException {
long start = Time.monotonicNow();
//wait for all the blocks to be replicated;
LOG.info("Checking for block replication for " + filename);
while (true) {
boolean replOk = true;
LocatedBlocks blocks = namenode.getBlockLocations(filename, 0,
Long.MAX_VALUE);
for (Iterator<LocatedBlock> iter = blocks.getLocatedBlocks().iterator();
iter.hasNext();) {
LocatedBlock block = iter.next();
int actual = block.getLocations().length;
if ( actual < expected ) {
LOG.info("Not enough replicas for " + block.getBlock()
+ " yet. Expecting " + expected + ", got " + actual + ".");
replOk = false;
break;
}
}
if (replOk) {
return;
}
if (maxWaitSec > 0 &&
(Time.monotonicNow() - start) > (maxWaitSec * 1000)) {
throw new IOException("Timedout while waiting for all blocks to " +
" be replicated for " + filename);
}
try {
Thread.sleep(500);
} catch (InterruptedException ignored) {}
}
}
/* This test makes sure that NameNode retries all the available blocks
* for under replicated blocks.
*
* It creates a file with one block and replication of 4. It corrupts
* two of the blocks and removes one of the replicas. Expected behavior is
* that missing replica will be copied from one valid source.
*/
@Test
public void testPendingReplicationRetry() throws IOException {
MiniDFSCluster cluster = null;
int numDataNodes = 4;
String testFile = "/replication-test-file";
Path testPath = new Path(testFile);
byte buffer[] = new byte[1024];
for (int i=0; i<buffer.length; i++) {
buffer[i] = '1';
}
try {
Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_REPLICATION_KEY, Integer.toString(numDataNodes));
//first time format
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
cluster.waitActive();
DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost",
cluster.getNameNodePort()),
conf);
OutputStream out = cluster.getFileSystem().create(testPath);
out.write(buffer);
out.close();
waitForBlockReplication(testFile, dfsClient.getNamenode(), numDataNodes, -1);
// get first block of the file.
ExtendedBlock block = dfsClient.getNamenode().getBlockLocations(testFile,
0, Long.MAX_VALUE).get(0).getBlock();
cluster.shutdown();
for (int i=0; i<25; i++) {
buffer[i] = '0';
}
int fileCount = 0;
// Choose 3 copies of block file - delete 1 and corrupt the remaining 2
for (int dnIndex=0; dnIndex<3; dnIndex++) {
File blockFile = cluster.getBlockFile(dnIndex, block);
LOG.info("Checking for file " + blockFile);
if (blockFile != null && blockFile.exists()) {
if (fileCount == 0) {
LOG.info("Deleting file " + blockFile);
assertTrue(blockFile.delete());
} else {
// corrupt it.
LOG.info("Corrupting file " + blockFile);
long len = blockFile.length();
assertTrue(len > 50);
RandomAccessFile blockOut = new RandomAccessFile(blockFile, "rw");
try {
blockOut.seek(len/3);
blockOut.write(buffer, 0, 25);
} finally {
blockOut.close();
}
}
fileCount++;
}
}
assertEquals(3, fileCount);
/* Start the MiniDFSCluster with more datanodes since once a writeBlock
* to a datanode node fails, same block can not be written to it
* immediately. In our case some replication attempts will fail.
*/
LOG.info("Restarting minicluster after deleting a replica and corrupting 2 crcs");
conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_REPLICATION_KEY, Integer.toString(numDataNodes));
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
conf.set("dfs.datanode.block.write.timeout.sec", Integer.toString(5));
conf.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, "0.75f"); // only 3 copies exist
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(numDataNodes * 2)
.format(false)
.build();
cluster.waitActive();
dfsClient = new DFSClient(new InetSocketAddress("localhost",
cluster.getNameNodePort()),
conf);
waitForBlockReplication(testFile, dfsClient.getNamenode(), numDataNodes, -1);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
/**
* Test if replication can detect mismatched length on-disk blocks
* @throws Exception
*/
@Test
public void testReplicateLenMismatchedBlock() throws Exception {
MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).numDataNodes(2).build();
try {
cluster.waitActive();
// test truncated block
changeBlockLen(cluster, -1);
// test extended block
changeBlockLen(cluster, 1);
} finally {
cluster.shutdown();
}
}
private void changeBlockLen(MiniDFSCluster cluster, int lenDelta)
throws IOException, InterruptedException, TimeoutException {
final Path fileName = new Path("/file1");
final short REPLICATION_FACTOR = (short)1;
final FileSystem fs = cluster.getFileSystem();
final int fileLen = fs.getConf().getInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 512);
DFSTestUtil.createFile(fs, fileName, fileLen, REPLICATION_FACTOR, 0);
DFSTestUtil.waitReplication(fs, fileName, REPLICATION_FACTOR);
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
// Change the length of a replica
for (int i=0; i<cluster.getDataNodes().size(); i++) {
if (DFSTestUtil.changeReplicaLength(cluster, block, i, lenDelta)) {
break;
}
}
// increase the file's replication factor
fs.setReplication(fileName, (short)(REPLICATION_FACTOR+1));
// block replication triggers corrupt block detection
DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost",
cluster.getNameNodePort()), fs.getConf());
LocatedBlocks blocks = dfsClient.getNamenode().getBlockLocations(
fileName.toString(), 0, fileLen);
if (lenDelta < 0) { // replica truncated
while (!blocks.get(0).isCorrupt() ||
REPLICATION_FACTOR != blocks.get(0).getLocations().length) {
Thread.sleep(100);
blocks = dfsClient.getNamenode().getBlockLocations(
fileName.toString(), 0, fileLen);
}
} else { // no corruption detected; block replicated
while (REPLICATION_FACTOR+1 != blocks.get(0).getLocations().length) {
Thread.sleep(100);
blocks = dfsClient.getNamenode().getBlockLocations(
fileName.toString(), 0, fileLen);
}
}
fs.delete(fileName, true);
}
/**
* Test that blocks should get replicated if we have corrupted blocks and
* having good replicas at least equal or greater to minreplication
*
* Simulate rbw blocks by creating dummy copies, then a DN restart to detect
* those corrupted blocks asap.
*/
@Test(timeout=30000)
public void testReplicationWhenBlockCorruption() throws Exception {
MiniDFSCluster cluster = null;
try {
Configuration conf = new HdfsConfiguration();
conf.setLong(
DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 1);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
FileSystem fs = cluster.getFileSystem();
FSDataOutputStream create = fs.create(new Path("/test"));
fs.setReplication(new Path("/test"), (short) 1);
create.write(new byte[1024]);
create.close();
List<File> nonParticipatedNodeDirs = new ArrayList<File>();
File participatedNodeDirs = null;
for (int i = 0; i < cluster.getDataNodes().size(); i++) {
File storageDir = cluster.getInstanceStorageDir(i, 0);
String bpid = cluster.getNamesystem().getBlockPoolId();
File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
if (data_dir.listFiles().length == 0) {
nonParticipatedNodeDirs.add(data_dir);
} else {
assertNull("participatedNodeDirs has already been set.",
participatedNodeDirs);
participatedNodeDirs = data_dir;
}
}
assertEquals(2, nonParticipatedNodeDirs.size());
String blockFile = null;
final List<File> listFiles = new ArrayList<>();
Files.walkFileTree(participatedNodeDirs.toPath(),
new SimpleFileVisitor<java.nio.file.Path>() {
@Override
public FileVisitResult visitFile(
java.nio.file.Path file, BasicFileAttributes attrs)
throws IOException {
listFiles.add(file.toFile());
return FileVisitResult.CONTINUE;
}
}
);
assertFalse(listFiles.isEmpty());
int numReplicaCreated = 0;
for (File file : listFiles) {
if (file.getName().startsWith(Block.BLOCK_FILE_PREFIX)
&& !file.getName().endsWith("meta")) {
blockFile = file.getName();
for (File file1 : nonParticipatedNodeDirs) {
file1.mkdirs();
new File(file1, blockFile).createNewFile();
new File(file1, blockFile + "_1000.meta").createNewFile();
numReplicaCreated++;
}
break;
}
}
assertEquals(2, numReplicaCreated);
fs.setReplication(new Path("/test"), (short) 3);
cluster.restartDataNodes(); // Lets detect all DNs about dummy copied
// blocks
cluster.waitActive();
cluster.triggerBlockReports();
DFSTestUtil.waitReplication(fs, new Path("/test"), (short) 3);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
}
| 21,418 | 36.380454 | 105 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Arrays;
import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.unix.DomainSocket;
import org.apache.hadoop.net.unix.TemporarySocketDirectory;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.junit.Assert;
import org.junit.Assume;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestBlockReaderLocalLegacy {
@BeforeClass
public static void setupCluster() throws IOException {
DFSInputStream.tcpReadsDisabledForTesting = true;
DomainSocket.disableBindPathValidation();
}
private static HdfsConfiguration getConfiguration(
TemporarySocketDirectory socketDir) throws IOException {
HdfsConfiguration conf = new HdfsConfiguration();
if (socketDir == null) {
conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY, "");
} else {
conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
new File(socketDir.getDir(), "TestBlockReaderLocalLegacy.%d.sock").
getAbsolutePath());
}
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true);
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY,
false);
conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,
UserGroupInformation.getCurrentUser().getShortUserName());
conf.setBoolean(DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, false);
// Set short retry timeouts so this test runs faster
conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
return conf;
}
/**
* Test that, in the case of an error, the position and limit of a ByteBuffer
* are left unchanged. This is not mandated by ByteBufferReadable, but clients
* of this class might immediately issue a retry on failure, so it's polite.
*/
@Test
public void testStablePositionAfterCorruptRead() throws Exception {
final short REPL_FACTOR = 1;
final long FILE_LENGTH = 512L;
HdfsConfiguration conf = getConfiguration(null);
MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
Path path = new Path("/corrupted");
DFSTestUtil.createFile(fs, path, FILE_LENGTH, REPL_FACTOR, 12345L);
DFSTestUtil.waitReplication(fs, path, REPL_FACTOR);
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, path);
int blockFilesCorrupted = cluster.corruptBlockOnDataNodes(block);
assertEquals("All replicas not corrupted", REPL_FACTOR, blockFilesCorrupted);
FSDataInputStream dis = cluster.getFileSystem().open(path);
ByteBuffer buf = ByteBuffer.allocateDirect((int)FILE_LENGTH);
boolean sawException = false;
try {
dis.read(buf);
} catch (ChecksumException ex) {
sawException = true;
}
assertTrue(sawException);
assertEquals(0, buf.position());
assertEquals(buf.capacity(), buf.limit());
dis = cluster.getFileSystem().open(path);
buf.position(3);
buf.limit(25);
sawException = false;
try {
dis.read(buf);
} catch (ChecksumException ex) {
sawException = true;
}
assertTrue(sawException);
assertEquals(3, buf.position());
assertEquals(25, buf.limit());
cluster.shutdown();
}
@Test
public void testBothOldAndNewShortCircuitConfigured() throws Exception {
final short REPL_FACTOR = 1;
final int FILE_LENGTH = 512;
Assume.assumeTrue(null == DomainSocket.getLoadingFailureReason());
TemporarySocketDirectory socketDir = new TemporarySocketDirectory();
HdfsConfiguration conf = getConfiguration(socketDir);
MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
socketDir.close();
FileSystem fs = cluster.getFileSystem();
Path path = new Path("/foo");
byte orig[] = new byte[FILE_LENGTH];
for (int i = 0; i < orig.length; i++) {
orig[i] = (byte)(i%10);
}
FSDataOutputStream fos = fs.create(path, (short)1);
fos.write(orig);
fos.close();
DFSTestUtil.waitReplication(fs, path, REPL_FACTOR);
FSDataInputStream fis = cluster.getFileSystem().open(path);
byte buf[] = new byte[FILE_LENGTH];
IOUtils.readFully(fis, buf, 0, FILE_LENGTH);
fis.close();
Assert.assertArrayEquals(orig, buf);
Arrays.equals(orig, buf);
cluster.shutdown();
}
@Test(timeout=20000)
public void testBlockReaderLocalLegacyWithAppend() throws Exception {
final short REPL_FACTOR = 1;
final HdfsConfiguration conf = getConfiguration(null);
conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true);
final MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final Path path = new Path("/testBlockReaderLocalLegacy");
DFSTestUtil.createFile(dfs, path, 10, REPL_FACTOR, 0);
DFSTestUtil.waitReplication(dfs, path, REPL_FACTOR);
final ClientDatanodeProtocol proxy;
final Token<BlockTokenIdentifier> token;
final ExtendedBlock originalBlock;
final long originalGS;
{
final LocatedBlock lb = cluster.getNameNode().getRpcServer()
.getBlockLocations(path.toString(), 0, 1).get(0);
proxy = DFSUtil.createClientDatanodeProtocolProxy(
lb.getLocations()[0], conf, 60000, false);
token = lb.getBlockToken();
// get block and generation stamp
final ExtendedBlock blk = new ExtendedBlock(lb.getBlock());
originalBlock = new ExtendedBlock(blk);
originalGS = originalBlock.getGenerationStamp();
// test getBlockLocalPathInfo
final BlockLocalPathInfo info = proxy.getBlockLocalPathInfo(blk, token);
Assert.assertEquals(originalGS, info.getBlock().getGenerationStamp());
}
{ // append one byte
FSDataOutputStream out = dfs.append(path);
out.write(1);
out.close();
}
{
// get new generation stamp
final LocatedBlock lb = cluster.getNameNode().getRpcServer()
.getBlockLocations(path.toString(), 0, 1).get(0);
final long newGS = lb.getBlock().getGenerationStamp();
Assert.assertTrue(newGS > originalGS);
// getBlockLocalPathInfo using the original block.
Assert.assertEquals(originalGS, originalBlock.getGenerationStamp());
final BlockLocalPathInfo info = proxy.getBlockLocalPathInfo(
originalBlock, token);
Assert.assertEquals(newGS, info.getBlock().getGenerationStamp());
}
cluster.shutdown();
}
}
| 8,289 | 36.511312 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.apache.hadoop.fs.CreateFlag.CREATE;
import static org.apache.hadoop.fs.CreateFlag.LAZY_PERSIST;
import static org.apache.hadoop.fs.CreateFlag.OVERWRITE;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.BufferedOutputStream;
import java.io.BufferedReader;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.Closeable;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InterruptedIOException;
import java.io.PrintStream;
import java.io.RandomAccessFile;
import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
import java.net.HttpURLConnection;
import java.net.InetSocketAddress;
import java.net.Socket;
import java.net.URI;
import java.net.URL;
import java.net.URLConnection;
import java.nio.ByteBuffer;
import java.security.NoSuchAlgorithmException;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.commons.io.FileUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.CacheFlag;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystem.Statistics;
import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclEntryScope;
import org.apache.hadoop.fs.permission.AclEntryType;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.MiniDFSCluster.NameNodeInfo;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeLayoutVersion;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.apache.hadoop.hdfs.server.datanode.TestTransferRbw;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.namenode.FSEditLog;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.hdfs.tools.DFSAdmin;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.unix.DomainSocket;
import org.apache.hadoop.net.unix.TemporarySocketDirectory;
import org.apache.hadoop.security.RefreshUserMappingsProtocol;
import org.apache.hadoop.security.ShellBasedUnixGroupsMapping;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.VersionInfo;
import org.apache.log4j.Level;
import org.junit.Assume;
import org.mockito.internal.util.reflection.Whitebox;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Charsets;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.base.Supplier;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
/** Utilities for HDFS tests */
public class DFSTestUtil {
private static final Log LOG = LogFactory.getLog(DFSTestUtil.class);
private static final Random gen = new Random();
private static final String[] dirNames = {
"zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"
};
private final int maxLevels;
private final int maxSize;
private final int minSize;
private final int nFiles;
private MyFile[] files;
/** Creates a new instance of DFSTestUtil
*
* @param nFiles Number of files to be created
* @param maxLevels Maximum number of directory levels
* @param maxSize Maximum size for file
* @param minSize Minimum size for file
*/
private DFSTestUtil(int nFiles, int maxLevels, int maxSize, int minSize) {
this.nFiles = nFiles;
this.maxLevels = maxLevels;
this.maxSize = maxSize;
this.minSize = minSize;
}
/** Creates a new instance of DFSTestUtil
*
* @param testName Name of the test from where this utility is used
* @param nFiles Number of files to be created
* @param maxLevels Maximum number of directory levels
* @param maxSize Maximum size for file
* @param minSize Minimum size for file
*/
public DFSTestUtil(String testName, int nFiles, int maxLevels, int maxSize,
int minSize) {
this.nFiles = nFiles;
this.maxLevels = maxLevels;
this.maxSize = maxSize;
this.minSize = minSize;
}
/**
* when formatting a namenode - we must provide clusterid.
* @param conf
* @throws IOException
*/
public static void formatNameNode(Configuration conf) throws IOException {
String clusterId = StartupOption.FORMAT.getClusterId();
if(clusterId == null || clusterId.isEmpty())
StartupOption.FORMAT.setClusterId("testClusterID");
// Use a copy of conf as it can be altered by namenode during format.
NameNode.format(new Configuration(conf));
}
/**
* Create a new HA-enabled configuration.
*/
public static Configuration newHAConfiguration(final String logicalName) {
Configuration conf = new Configuration();
addHAConfiguration(conf, logicalName);
return conf;
}
/**
* Add a new HA configuration.
*/
public static void addHAConfiguration(Configuration conf,
final String logicalName) {
String nsIds = conf.get(DFSConfigKeys.DFS_NAMESERVICES);
if (nsIds == null) {
conf.set(DFSConfigKeys.DFS_NAMESERVICES, logicalName);
} else { // append the nsid
conf.set(DFSConfigKeys.DFS_NAMESERVICES, nsIds + "," + logicalName);
}
conf.set(DFSUtil.addKeySuffixes(HdfsClientConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX,
logicalName), "nn1,nn2");
conf.set(HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX +
"." + logicalName,
ConfiguredFailoverProxyProvider.class.getName());
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
}
public static void setFakeHttpAddresses(Configuration conf,
final String logicalName) {
conf.set(DFSUtil.addKeySuffixes(
DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY,
logicalName, "nn1"), "127.0.0.1:12345");
conf.set(DFSUtil.addKeySuffixes(
DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY,
logicalName, "nn2"), "127.0.0.1:12346");
}
public static void setEditLogForTesting(FSNamesystem fsn, FSEditLog newLog) {
Whitebox.setInternalState(fsn.getFSImage(), "editLog", newLog);
Whitebox.setInternalState(fsn.getFSDirectory(), "editLog", newLog);
}
/** class MyFile contains enough information to recreate the contents of
* a single file.
*/
private class MyFile {
private String name = "";
private final int size;
private final long seed;
MyFile() {
int nLevels = gen.nextInt(maxLevels);
if (nLevels != 0) {
int[] levels = new int[nLevels];
for (int idx = 0; idx < nLevels; idx++) {
levels[idx] = gen.nextInt(10);
}
StringBuffer sb = new StringBuffer();
for (int idx = 0; idx < nLevels; idx++) {
sb.append(dirNames[levels[idx]]);
sb.append("/");
}
name = sb.toString();
}
long fidx = -1;
while (fidx < 0) { fidx = gen.nextLong(); }
name = name + Long.toString(fidx);
size = minSize + gen.nextInt(maxSize - minSize);
seed = gen.nextLong();
}
String getName() { return name; }
int getSize() { return size; }
long getSeed() { return seed; }
}
public void createFiles(FileSystem fs, String topdir) throws IOException {
createFiles(fs, topdir, (short)3);
}
/** create nFiles with random names and directory hierarchies
* with random (but reproducible) data in them.
*/
public void createFiles(FileSystem fs, String topdir,
short replicationFactor) throws IOException {
files = new MyFile[nFiles];
for (int idx = 0; idx < nFiles; idx++) {
files[idx] = new MyFile();
}
Path root = new Path(topdir);
for (int idx = 0; idx < nFiles; idx++) {
createFile(fs, new Path(root, files[idx].getName()), files[idx].getSize(),
replicationFactor, files[idx].getSeed());
}
}
public static String readFile(FileSystem fs, Path fileName)
throws IOException {
byte buf[] = readFileBuffer(fs, fileName);
return new String(buf, 0, buf.length);
}
public static byte[] readFileBuffer(FileSystem fs, Path fileName)
throws IOException {
ByteArrayOutputStream os = new ByteArrayOutputStream();
try {
FSDataInputStream in = fs.open(fileName);
try {
IOUtils.copyBytes(in, os, 1024, true);
return os.toByteArray();
} finally {
in.close();
}
} finally {
os.close();
}
}
public static void createFile(FileSystem fs, Path fileName, long fileLen,
short replFactor, long seed) throws IOException {
if (!fs.mkdirs(fileName.getParent())) {
throw new IOException("Mkdirs failed to create " +
fileName.getParent().toString());
}
FSDataOutputStream out = null;
try {
out = fs.create(fileName, replFactor);
byte[] toWrite = new byte[1024];
Random rb = new Random(seed);
long bytesToWrite = fileLen;
while (bytesToWrite>0) {
rb.nextBytes(toWrite);
int bytesToWriteNext = (1024<bytesToWrite)?1024:(int)bytesToWrite;
out.write(toWrite, 0, bytesToWriteNext);
bytesToWrite -= bytesToWriteNext;
}
out.close();
out = null;
} finally {
IOUtils.closeStream(out);
}
}
public static void createFile(FileSystem fs, Path fileName, int bufferLen,
long fileLen, long blockSize, short replFactor, long seed)
throws IOException {
createFile(fs, fileName, false, bufferLen, fileLen, blockSize, replFactor,
seed, false);
}
public static void createFile(FileSystem fs, Path fileName,
boolean isLazyPersist, int bufferLen, long fileLen, long blockSize,
short replFactor, long seed, boolean flush) throws IOException {
createFile(fs, fileName, isLazyPersist, bufferLen, fileLen, blockSize,
replFactor, seed, flush, null);
}
public static void createFile(FileSystem fs, Path fileName,
boolean isLazyPersist, int bufferLen, long fileLen, long blockSize,
short replFactor, long seed, boolean flush,
InetSocketAddress[] favoredNodes) throws IOException {
assert bufferLen > 0;
if (!fs.mkdirs(fileName.getParent())) {
throw new IOException("Mkdirs failed to create " +
fileName.getParent().toString());
}
FSDataOutputStream out = null;
EnumSet<CreateFlag> createFlags = EnumSet.of(CREATE);
createFlags.add(OVERWRITE);
if (isLazyPersist) {
createFlags.add(LAZY_PERSIST);
}
try {
if (favoredNodes == null) {
out = fs.create(
fileName,
FsPermission.getFileDefault(),
createFlags,
fs.getConf().getInt(
CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
replFactor, blockSize, null);
} else {
out = ((DistributedFileSystem) fs).create(fileName,
FsPermission.getDefault(), true, bufferLen, replFactor, blockSize,
null, favoredNodes);
}
if (fileLen > 0) {
byte[] toWrite = new byte[bufferLen];
Random rb = new Random(seed);
long bytesToWrite = fileLen;
while (bytesToWrite>0) {
rb.nextBytes(toWrite);
int bytesToWriteNext = (bufferLen < bytesToWrite) ? bufferLen
: (int) bytesToWrite;
out.write(toWrite, 0, bytesToWriteNext);
bytesToWrite -= bytesToWriteNext;
}
if (flush) {
out.hsync();
}
}
} finally {
if (out != null) {
out.close();
}
}
}
public static byte[] calculateFileContentsFromSeed(long seed, int length) {
Random rb = new Random(seed);
byte val[] = new byte[length];
rb.nextBytes(val);
return val;
}
/** check if the files have been copied correctly. */
public boolean checkFiles(FileSystem fs, String topdir) throws IOException {
Path root = new Path(topdir);
for (int idx = 0; idx < nFiles; idx++) {
Path fPath = new Path(root, files[idx].getName());
FSDataInputStream in = fs.open(fPath);
byte[] toRead = new byte[files[idx].getSize()];
byte[] toCompare = new byte[files[idx].getSize()];
Random rb = new Random(files[idx].getSeed());
rb.nextBytes(toCompare);
in.readFully(0, toRead);
in.close();
for (int i = 0; i < toRead.length; i++) {
if (toRead[i] != toCompare[i]) {
return false;
}
}
toRead = null;
toCompare = null;
}
return true;
}
void setReplication(FileSystem fs, String topdir, short value)
throws IOException {
Path root = new Path(topdir);
for (int idx = 0; idx < nFiles; idx++) {
Path fPath = new Path(root, files[idx].getName());
fs.setReplication(fPath, value);
}
}
/*
* Waits for the replication factor of all files to reach the
* specified target.
*/
public void waitReplication(FileSystem fs, String topdir, short value)
throws IOException, InterruptedException, TimeoutException {
Path root = new Path(topdir);
/** wait for the replication factor to settle down */
for (int idx = 0; idx < nFiles; idx++) {
waitReplication(fs, new Path(root, files[idx].getName()), value);
}
}
/*
* Check if the given block in the given file is corrupt.
*/
public static boolean allBlockReplicasCorrupt(MiniDFSCluster cluster,
Path file, int blockNo) throws IOException {
DFSClient client = new DFSClient(new InetSocketAddress("localhost",
cluster.getNameNodePort()), cluster.getConfiguration(0));
LocatedBlocks blocks;
try {
blocks = client.getNamenode().getBlockLocations(
file.toString(), 0, Long.MAX_VALUE);
} finally {
client.close();
}
return blocks.get(blockNo).isCorrupt();
}
/*
* Wait up to 20s for the given block to be replicated across
* the requested number of racks, with the requested number of
* replicas, and the requested number of replicas still needed.
*/
public static void waitForReplication(MiniDFSCluster cluster, ExtendedBlock b,
int racks, int replicas, int neededReplicas)
throws TimeoutException, InterruptedException {
int curRacks = 0;
int curReplicas = 0;
int curNeededReplicas = 0;
int count = 0;
final int ATTEMPTS = 20;
do {
Thread.sleep(1000);
int[] r = BlockManagerTestUtil.getReplicaInfo(cluster.getNamesystem(),
b.getLocalBlock());
curRacks = r[0];
curReplicas = r[1];
curNeededReplicas = r[2];
count++;
} while ((curRacks != racks ||
curReplicas != replicas ||
curNeededReplicas != neededReplicas) && count < ATTEMPTS);
if (count == ATTEMPTS) {
throw new TimeoutException("Timed out waiting for replication."
+ " Needed replicas = "+neededReplicas
+ " Cur needed replicas = "+curNeededReplicas
+ " Replicas = "+replicas+" Cur replicas = "+curReplicas
+ " Racks = "+racks+" Cur racks = "+curRacks);
}
}
public static void waitForReplication(final DistributedFileSystem dfs,
final Path file, final short replication, int waitForMillis)
throws TimeoutException, InterruptedException {
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
try {
FileStatus stat = dfs.getFileStatus(file);
BlockLocation[] locs = dfs.getFileBlockLocations(stat, 0, stat
.getLen());
for (BlockLocation loc : locs) {
if (replication != loc.getHosts().length) {
return false;
}
}
return true;
} catch (IOException e) {
LOG.info("getFileStatus on path " + file + " failed!", e);
return false;
}
}
}, 100, waitForMillis);
}
/**
* Keep accessing the given file until the namenode reports that the
* given block in the file contains the given number of corrupt replicas.
*/
public static void waitCorruptReplicas(FileSystem fs, FSNamesystem ns,
Path file, ExtendedBlock b, int corruptRepls)
throws TimeoutException, InterruptedException {
int count = 0;
final int ATTEMPTS = 50;
int repls = ns.getBlockManager().numCorruptReplicas(b.getLocalBlock());
while (repls != corruptRepls && count < ATTEMPTS) {
try {
IOUtils.copyBytes(fs.open(file), new IOUtils.NullOutputStream(),
512, true);
} catch (IOException e) {
// Swallow exceptions
}
System.out.println("Waiting for "+corruptRepls+" corrupt replicas");
count++;
// check more often so corrupt block reports are not easily missed
for (int i = 0; i < 10; i++) {
repls = ns.getBlockManager().numCorruptReplicas(b.getLocalBlock());
Thread.sleep(100);
if (repls == corruptRepls) {
break;
}
}
}
if (count == ATTEMPTS) {
throw new TimeoutException("Timed out waiting for corrupt replicas."
+ " Waiting for "+corruptRepls+", but only found "+repls);
}
}
/*
* Wait up to 20s for the given DN (IP:port) to be decommissioned
*/
public static void waitForDecommission(FileSystem fs, String name)
throws IOException, InterruptedException, TimeoutException {
DatanodeInfo dn = null;
int count = 0;
final int ATTEMPTS = 20;
do {
Thread.sleep(1000);
DistributedFileSystem dfs = (DistributedFileSystem)fs;
for (DatanodeInfo info : dfs.getDataNodeStats()) {
if (name.equals(info.getXferAddr())) {
dn = info;
}
}
count++;
} while ((dn == null ||
dn.isDecommissionInProgress() ||
!dn.isDecommissioned()) && count < ATTEMPTS);
if (count == ATTEMPTS) {
throw new TimeoutException("Timed out waiting for datanode "
+ name + " to decommission.");
}
}
/*
* Returns the index of the first datanode which has a copy
* of the given block, or -1 if no such datanode exists.
*/
public static int firstDnWithBlock(MiniDFSCluster cluster, ExtendedBlock b)
throws IOException {
int numDatanodes = cluster.getDataNodes().size();
for (int i = 0; i < numDatanodes; i++) {
String blockContent = cluster.readBlockOnDataNode(i, b);
if (blockContent != null) {
return i;
}
}
return -1;
}
/*
* Return the total capacity of all live DNs.
*/
public static long getLiveDatanodeCapacity(DatanodeManager dm) {
final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
dm.fetchDatanodes(live, null, false);
long capacity = 0;
for (final DatanodeDescriptor dn : live) {
capacity += dn.getCapacity();
}
return capacity;
}
/*
* Return the capacity of the given live DN.
*/
public static long getDatanodeCapacity(DatanodeManager dm, int index) {
final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
dm.fetchDatanodes(live, null, false);
return live.get(index).getCapacity();
}
/*
* Wait for the given # live/dead DNs, total capacity, and # vol failures.
*/
public static void waitForDatanodeStatus(DatanodeManager dm, int expectedLive,
int expectedDead, long expectedVolFails, long expectedTotalCapacity,
long timeout) throws InterruptedException, TimeoutException {
final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
final int ATTEMPTS = 10;
int count = 0;
long currTotalCapacity = 0;
int volFails = 0;
do {
Thread.sleep(timeout);
live.clear();
dead.clear();
dm.fetchDatanodes(live, dead, false);
currTotalCapacity = 0;
volFails = 0;
for (final DatanodeDescriptor dd : live) {
currTotalCapacity += dd.getCapacity();
volFails += dd.getVolumeFailures();
}
count++;
} while ((expectedLive != live.size() ||
expectedDead != dead.size() ||
expectedTotalCapacity != currTotalCapacity ||
expectedVolFails != volFails)
&& count < ATTEMPTS);
if (count == ATTEMPTS) {
throw new TimeoutException("Timed out waiting for capacity."
+ " Live = "+live.size()+" Expected = "+expectedLive
+ " Dead = "+dead.size()+" Expected = "+expectedDead
+ " Total capacity = "+currTotalCapacity
+ " Expected = "+expectedTotalCapacity
+ " Vol Fails = "+volFails+" Expected = "+expectedVolFails);
}
}
/*
* Wait for the given DN to consider itself dead.
*/
public static void waitForDatanodeDeath(DataNode dn)
throws InterruptedException, TimeoutException {
final int ATTEMPTS = 10;
int count = 0;
do {
Thread.sleep(1000);
count++;
} while (dn.isDatanodeUp() && count < ATTEMPTS);
if (count == ATTEMPTS) {
throw new TimeoutException("Timed out waiting for DN to die");
}
}
/** return list of filenames created as part of createFiles */
public String[] getFileNames(String topDir) {
if (nFiles == 0)
return new String[]{};
else {
String[] fileNames = new String[nFiles];
for (int idx=0; idx < nFiles; idx++) {
fileNames[idx] = topDir + "/" + files[idx].getName();
}
return fileNames;
}
}
/**
* Wait for the given file to reach the given replication factor.
* @throws TimeoutException if we fail to sufficiently replicate the file
*/
public static void waitReplication(FileSystem fs, Path fileName, short replFactor)
throws IOException, InterruptedException, TimeoutException {
boolean correctReplFactor;
final int ATTEMPTS = 40;
int count = 0;
do {
correctReplFactor = true;
BlockLocation locs[] = fs.getFileBlockLocations(
fs.getFileStatus(fileName), 0, Long.MAX_VALUE);
count++;
for (int j = 0; j < locs.length; j++) {
String[] hostnames = locs[j].getNames();
if (hostnames.length != replFactor) {
correctReplFactor = false;
System.out.println("Block " + j + " of file " + fileName
+ " has replication factor " + hostnames.length
+ " (desired " + replFactor + "); locations "
+ Joiner.on(' ').join(hostnames));
Thread.sleep(1000);
break;
}
}
if (correctReplFactor) {
System.out.println("All blocks of file " + fileName
+ " verified to have replication factor " + replFactor);
}
} while (!correctReplFactor && count < ATTEMPTS);
if (count == ATTEMPTS) {
throw new TimeoutException("Timed out waiting for " + fileName +
" to reach " + replFactor + " replicas");
}
}
/** delete directory and everything underneath it.*/
public void cleanup(FileSystem fs, String topdir) throws IOException {
Path root = new Path(topdir);
fs.delete(root, true);
files = null;
}
public static ExtendedBlock getFirstBlock(FileSystem fs, Path path) throws IOException {
HdfsDataInputStream in = (HdfsDataInputStream) fs.open(path);
try {
in.readByte();
return in.getCurrentBlock();
} finally {
in.close();
}
}
public static List<LocatedBlock> getAllBlocks(FSDataInputStream in)
throws IOException {
return ((HdfsDataInputStream) in).getAllBlocks();
}
public static List<LocatedBlock> getAllBlocks(FileSystem fs, Path path)
throws IOException {
HdfsDataInputStream in = (HdfsDataInputStream) fs.open(path);
return in.getAllBlocks();
}
public static Token<BlockTokenIdentifier> getBlockToken(
FSDataOutputStream out) {
return ((DFSOutputStream) out.getWrappedStream()).getBlockToken();
}
public static String readFile(File f) throws IOException {
StringBuilder b = new StringBuilder();
BufferedReader in = new BufferedReader(new FileReader(f));
for(int c; (c = in.read()) != -1; b.append((char)c));
in.close();
return b.toString();
}
/* Write the given string to the given file */
public static void writeFile(FileSystem fs, Path p, String s)
throws IOException {
if (fs.exists(p)) {
fs.delete(p, true);
}
InputStream is = new ByteArrayInputStream(s.getBytes());
FSDataOutputStream os = fs.create(p);
IOUtils.copyBytes(is, os, s.length(), true);
}
/* Append the given string to the given file */
public static void appendFile(FileSystem fs, Path p, String s)
throws IOException {
assert fs.exists(p);
InputStream is = new ByteArrayInputStream(s.getBytes());
FSDataOutputStream os = fs.append(p);
IOUtils.copyBytes(is, os, s.length(), true);
}
/**
* Append specified length of bytes to a given file
* @param fs The file system
* @param p Path of the file to append
* @param length Length of bytes to append to the file
* @throws IOException
*/
public static void appendFile(FileSystem fs, Path p, int length)
throws IOException {
assert fs.exists(p);
assert length >= 0;
byte[] toAppend = new byte[length];
Random random = new Random();
random.nextBytes(toAppend);
FSDataOutputStream out = fs.append(p);
out.write(toAppend);
out.close();
}
/**
* @return url content as string (UTF-8 encoding assumed)
*/
public static String urlGet(URL url) throws IOException {
return new String(urlGetBytes(url), Charsets.UTF_8);
}
/**
* @return URL contents as a byte array
*/
public static byte[] urlGetBytes(URL url) throws IOException {
URLConnection conn = url.openConnection();
HttpURLConnection hc = (HttpURLConnection)conn;
assertEquals(HttpURLConnection.HTTP_OK, hc.getResponseCode());
ByteArrayOutputStream out = new ByteArrayOutputStream();
IOUtils.copyBytes(conn.getInputStream(), out, 4096, true);
return out.toByteArray();
}
/**
* mock class to get group mapping for fake users
*
*/
static class MockUnixGroupsMapping extends ShellBasedUnixGroupsMapping {
static Map<String, String []> fakeUser2GroupsMap;
private static final List<String> defaultGroups;
static {
defaultGroups = new ArrayList<String>(1);
defaultGroups.add("supergroup");
fakeUser2GroupsMap = new HashMap<String, String[]>();
}
@Override
public List<String> getGroups(String user) throws IOException {
boolean found = false;
// check to see if this is one of fake users
List<String> l = new ArrayList<String>();
for(String u : fakeUser2GroupsMap.keySet()) {
if(user.equals(u)) {
found = true;
for(String gr : fakeUser2GroupsMap.get(u)) {
l.add(gr);
}
}
}
// default
if(!found) {
l = super.getGroups(user);
if(l.size() == 0) {
System.out.println("failed to get real group for " + user +
"; using default");
return defaultGroups;
}
}
return l;
}
}
/**
* update the configuration with fake class for mapping user to groups
* @param conf
* @param map - user to groups mapping
*/
static public void updateConfWithFakeGroupMapping
(Configuration conf, Map<String, String []> map) {
if(map!=null) {
MockUnixGroupsMapping.fakeUser2GroupsMap = map;
}
// fake mapping user to groups
conf.setClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
DFSTestUtil.MockUnixGroupsMapping.class,
ShellBasedUnixGroupsMapping.class);
}
/**
* Get a FileSystem instance as specified user in a doAs block.
*/
static public FileSystem getFileSystemAs(UserGroupInformation ugi,
final Configuration conf) throws IOException {
try {
return ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws Exception {
return FileSystem.get(conf);
}
});
} catch (InterruptedException e) {
throw (InterruptedIOException)new InterruptedIOException().initCause(e);
}
}
public static byte[] generateSequentialBytes(int start, int length) {
byte[] result = new byte[length];
for (int i = 0; i < length; i++) {
result[i] = (byte) ((start + i) % 127);
}
return result;
}
public static Statistics getStatistics(FileSystem fs) {
return FileSystem.getStatistics(fs.getUri().getScheme(), fs.getClass());
}
/**
* Load file into byte[]
*/
public static byte[] loadFile(String filename) throws IOException {
File file = new File(filename);
DataInputStream in = new DataInputStream(new FileInputStream(file));
byte[] content = new byte[(int)file.length()];
try {
in.readFully(content);
} finally {
IOUtils.cleanup(LOG, in);
}
return content;
}
/** For {@link TestTransferRbw} */
public static BlockOpResponseProto transferRbw(final ExtendedBlock b,
final DFSClient dfsClient, final DatanodeInfo... datanodes) throws IOException {
assertEquals(2, datanodes.length);
final Socket s = DataStreamer.createSocketForPipeline(datanodes[0],
datanodes.length, dfsClient);
final long writeTimeout = dfsClient.getDatanodeWriteTimeout(datanodes.length);
final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
NetUtils.getOutputStream(s, writeTimeout),
DFSUtil.getSmallBufferSize(dfsClient.getConfiguration())));
final DataInputStream in = new DataInputStream(NetUtils.getInputStream(s));
// send the request
new Sender(out).transferBlock(b, new Token<BlockTokenIdentifier>(),
dfsClient.clientName, new DatanodeInfo[]{datanodes[1]},
new StorageType[]{StorageType.DEFAULT});
out.flush();
return BlockOpResponseProto.parseDelimitedFrom(in);
}
public static void setFederatedConfiguration(MiniDFSCluster cluster,
Configuration conf) {
Set<String> nameservices = new HashSet<String>();
for (NameNodeInfo info : cluster.getNameNodeInfos()) {
assert info.nameserviceId != null;
nameservices.add(info.nameserviceId);
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,
info.nameserviceId), DFSUtil.createUri(HdfsConstants.HDFS_URI_SCHEME,
info.nameNode.getNameNodeAddress()).toString());
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
info.nameserviceId), DFSUtil.createUri(HdfsConstants.HDFS_URI_SCHEME,
info.nameNode.getNameNodeAddress()).toString());
}
conf.set(DFSConfigKeys.DFS_NAMESERVICES, Joiner.on(",")
.join(nameservices));
}
public static void setFederatedHAConfiguration(MiniDFSCluster cluster,
Configuration conf) {
Map<String, List<String>> nameservices = Maps.newHashMap();
for (NameNodeInfo info : cluster.getNameNodeInfos()) {
Preconditions.checkState(info.nameserviceId != null);
List<String> nns = nameservices.get(info.nameserviceId);
if (nns == null) {
nns = Lists.newArrayList();
nameservices.put(info.nameserviceId, nns);
}
nns.add(info.nnId);
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,
info.nameserviceId, info.nnId),
DFSUtil.createUri(HdfsConstants.HDFS_URI_SCHEME,
info.nameNode.getNameNodeAddress()).toString());
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
info.nameserviceId, info.nnId),
DFSUtil.createUri(HdfsConstants.HDFS_URI_SCHEME,
info.nameNode.getNameNodeAddress()).toString());
}
for (Map.Entry<String, List<String>> entry : nameservices.entrySet()) {
conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX,
entry.getKey()), Joiner.on(",").join(entry.getValue()));
conf.set(HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX + "."
+ entry.getKey(), ConfiguredFailoverProxyProvider.class.getName());
}
conf.set(DFSConfigKeys.DFS_NAMESERVICES, Joiner.on(",")
.join(nameservices.keySet()));
}
private static DatanodeID getDatanodeID(String ipAddr) {
return new DatanodeID(ipAddr, "localhost",
UUID.randomUUID().toString(),
DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT);
}
public static DatanodeID getLocalDatanodeID() {
return getDatanodeID("127.0.0.1");
}
public static DatanodeID getLocalDatanodeID(int port) {
return new DatanodeID("127.0.0.1", "localhost",
UUID.randomUUID().toString(),
port, port, port, port);
}
public static DatanodeDescriptor getLocalDatanodeDescriptor() {
return new DatanodeDescriptor(getLocalDatanodeID());
}
public static DatanodeInfo getLocalDatanodeInfo() {
return new DatanodeInfo(getLocalDatanodeID());
}
public static DatanodeInfo getDatanodeInfo(String ipAddr) {
return new DatanodeInfo(getDatanodeID(ipAddr));
}
public static DatanodeInfo getLocalDatanodeInfo(int port) {
return new DatanodeInfo(getLocalDatanodeID(port));
}
public static DatanodeInfo getDatanodeInfo(String ipAddr,
String host, int port) {
return new DatanodeInfo(new DatanodeID(ipAddr, host,
UUID.randomUUID().toString(), port,
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT));
}
public static DatanodeInfo getLocalDatanodeInfo(String ipAddr,
String hostname, AdminStates adminState) {
return new DatanodeInfo(ipAddr, hostname, "",
DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT,
1l, 2l, 3l, 4l, 0l, 0l, 0l, 5, 6, "local", adminState);
}
public static DatanodeDescriptor getDatanodeDescriptor(String ipAddr,
String rackLocation) {
return getDatanodeDescriptor(ipAddr, DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT,
rackLocation);
}
public static DatanodeDescriptor getDatanodeDescriptor(String ipAddr,
String rackLocation, String hostname) {
return getDatanodeDescriptor(ipAddr,
DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT, rackLocation, hostname);
}
public static DatanodeStorageInfo createDatanodeStorageInfo(
String storageID, String ip) {
return createDatanodeStorageInfo(storageID, ip, "defaultRack", "host");
}
public static DatanodeStorageInfo[] createDatanodeStorageInfos(String[] racks) {
return createDatanodeStorageInfos(racks, null);
}
public static DatanodeStorageInfo[] createDatanodeStorageInfos(String[] racks, String[] hostnames) {
return createDatanodeStorageInfos(racks.length, racks, hostnames);
}
public static DatanodeStorageInfo[] createDatanodeStorageInfos(int n) {
return createDatanodeStorageInfos(n, null, null);
}
public static DatanodeStorageInfo[] createDatanodeStorageInfos(
int n, String[] racks, String[] hostnames) {
return createDatanodeStorageInfos(n, racks, hostnames, null);
}
public static DatanodeStorageInfo[] createDatanodeStorageInfos(
int n, String[] racks, String[] hostnames, StorageType[] types) {
DatanodeStorageInfo[] storages = new DatanodeStorageInfo[n];
for(int i = storages.length; i > 0; ) {
final String storageID = "s" + i;
final String ip = i + "." + i + "." + i + "." + i;
i--;
final String rack = (racks!=null && i < racks.length)? racks[i]: "defaultRack";
final String hostname = (hostnames!=null && i < hostnames.length)? hostnames[i]: "host";
final StorageType type = (types != null && i < types.length) ? types[i]
: StorageType.DEFAULT;
storages[i] = createDatanodeStorageInfo(storageID, ip, rack, hostname,
type);
}
return storages;
}
public static DatanodeStorageInfo createDatanodeStorageInfo(
String storageID, String ip, String rack, String hostname) {
return createDatanodeStorageInfo(storageID, ip, rack, hostname,
StorageType.DEFAULT);
}
public static DatanodeStorageInfo createDatanodeStorageInfo(
String storageID, String ip, String rack, String hostname,
StorageType type) {
final DatanodeStorage storage = new DatanodeStorage(storageID,
DatanodeStorage.State.NORMAL, type);
final DatanodeDescriptor dn = BlockManagerTestUtil.getDatanodeDescriptor(
ip, rack, storage, hostname);
return BlockManagerTestUtil.newDatanodeStorageInfo(dn, storage);
}
public static DatanodeDescriptor[] toDatanodeDescriptor(
DatanodeStorageInfo[] storages) {
DatanodeDescriptor[] datanodes = new DatanodeDescriptor[storages.length];
for(int i = 0; i < datanodes.length; i++) {
datanodes[i] = storages[i].getDatanodeDescriptor();
}
return datanodes;
}
public static DatanodeDescriptor getDatanodeDescriptor(String ipAddr,
int port, String rackLocation, String hostname) {
DatanodeID dnId = new DatanodeID(ipAddr, hostname,
UUID.randomUUID().toString(), port,
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT);
return new DatanodeDescriptor(dnId, rackLocation);
}
public static DatanodeDescriptor getDatanodeDescriptor(String ipAddr,
int port, String rackLocation) {
return getDatanodeDescriptor(ipAddr, port, rackLocation, "host");
}
public static DatanodeRegistration getLocalDatanodeRegistration() {
return new DatanodeRegistration(getLocalDatanodeID(), new StorageInfo(
NodeType.DATA_NODE), new ExportedBlockKeys(), VersionInfo.getVersion());
}
/** Copy one file's contents into the other **/
public static void copyFile(File src, File dest) throws IOException {
FileUtils.copyFile(src, dest);
}
public static class Builder {
private int maxLevels = 3;
private int maxSize = 8*1024;
private int minSize = 1;
private int nFiles = 1;
public Builder() {
}
public Builder setName(String string) {
return this;
}
public Builder setNumFiles(int nFiles) {
this.nFiles = nFiles;
return this;
}
public Builder setMaxLevels(int maxLevels) {
this.maxLevels = maxLevels;
return this;
}
public Builder setMaxSize(int maxSize) {
this.maxSize = maxSize;
return this;
}
public Builder setMinSize(int minSize) {
this.minSize = minSize;
return this;
}
public DFSTestUtil build() {
return new DFSTestUtil(nFiles, maxLevels, maxSize, minSize);
}
}
/**
* Run a set of operations and generate all edit logs
*/
public static void runOperations(MiniDFSCluster cluster,
DistributedFileSystem filesystem, Configuration conf, long blockSize,
int nnIndex) throws IOException {
// create FileContext for rename2
FileContext fc = FileContext.getFileContext(cluster.getURI(0), conf);
// OP_ADD 0
final Path pathFileCreate = new Path("/file_create");
FSDataOutputStream s = filesystem.create(pathFileCreate);
// OP_CLOSE 9
s.close();
// OP_APPEND 47
FSDataOutputStream s2 = filesystem.append(pathFileCreate, 4096, null);
s2.close();
// OP_SET_STORAGE_POLICY 45
filesystem.setStoragePolicy(pathFileCreate,
HdfsConstants.HOT_STORAGE_POLICY_NAME);
// OP_RENAME_OLD 1
final Path pathFileMoved = new Path("/file_moved");
filesystem.rename(pathFileCreate, pathFileMoved);
// OP_DELETE 2
filesystem.delete(pathFileMoved, false);
// OP_MKDIR 3
Path pathDirectoryMkdir = new Path("/directory_mkdir");
filesystem.mkdirs(pathDirectoryMkdir);
// OP_ALLOW_SNAPSHOT 29
filesystem.allowSnapshot(pathDirectoryMkdir);
// OP_DISALLOW_SNAPSHOT 30
filesystem.disallowSnapshot(pathDirectoryMkdir);
// OP_CREATE_SNAPSHOT 26
String ssName = "snapshot1";
filesystem.allowSnapshot(pathDirectoryMkdir);
filesystem.createSnapshot(pathDirectoryMkdir, ssName);
// OP_RENAME_SNAPSHOT 28
String ssNewName = "snapshot2";
filesystem.renameSnapshot(pathDirectoryMkdir, ssName, ssNewName);
// OP_DELETE_SNAPSHOT 27
filesystem.deleteSnapshot(pathDirectoryMkdir, ssNewName);
// OP_SET_REPLICATION 4
s = filesystem.create(pathFileCreate);
s.close();
filesystem.setReplication(pathFileCreate, (short)1);
// OP_SET_PERMISSIONS 7
Short permission = 0777;
filesystem.setPermission(pathFileCreate, new FsPermission(permission));
// OP_SET_OWNER 8
filesystem.setOwner(pathFileCreate, new String("newOwner"), null);
// OP_CLOSE 9 see above
// OP_SET_GENSTAMP 10 see above
// OP_SET_NS_QUOTA 11 obsolete
// OP_CLEAR_NS_QUOTA 12 obsolete
// OP_TIMES 13
long mtime = 1285195527000L; // Wed, 22 Sep 2010 22:45:27 GMT
long atime = mtime;
filesystem.setTimes(pathFileCreate, mtime, atime);
// OP_SET_QUOTA 14
filesystem.setQuota(pathDirectoryMkdir, 1000L,
HdfsConstants.QUOTA_DONT_SET);
// OP_SET_QUOTA_BY_STORAGETYPE
filesystem.setQuotaByStorageType(pathDirectoryMkdir, StorageType.SSD, 888L);
// OP_RENAME 15
fc.rename(pathFileCreate, pathFileMoved, Rename.NONE);
// OP_CONCAT_DELETE 16
Path pathConcatTarget = new Path("/file_concat_target");
Path[] pathConcatFiles = new Path[2];
pathConcatFiles[0] = new Path("/file_concat_0");
pathConcatFiles[1] = new Path("/file_concat_1");
long length = blockSize * 3; // multiple of blocksize for concat
short replication = 1;
long seed = 1;
DFSTestUtil.createFile(filesystem, pathConcatTarget, length, replication,
seed);
DFSTestUtil.createFile(filesystem, pathConcatFiles[0], length, replication,
seed);
DFSTestUtil.createFile(filesystem, pathConcatFiles[1], length, replication,
seed);
filesystem.concat(pathConcatTarget, pathConcatFiles);
// OP_TRUNCATE 46
length = blockSize * 2;
DFSTestUtil.createFile(filesystem, pathFileCreate, length, replication,
seed);
filesystem.truncate(pathFileCreate, blockSize);
// OP_SYMLINK 17
Path pathSymlink = new Path("/file_symlink");
fc.createSymlink(pathConcatTarget, pathSymlink, false);
// OP_REASSIGN_LEASE 22
String filePath = "/hard-lease-recovery-test";
byte[] bytes = "foo-bar-baz".getBytes();
DFSClientAdapter.stopLeaseRenewer(filesystem);
FSDataOutputStream leaseRecoveryPath = filesystem.create(new Path(filePath));
leaseRecoveryPath.write(bytes);
leaseRecoveryPath.hflush();
// Set the hard lease timeout to 1 second.
cluster.setLeasePeriod(60 * 1000, 1000, nnIndex);
// wait for lease recovery to complete
LocatedBlocks locatedBlocks;
do {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {}
locatedBlocks = DFSClientAdapter.callGetBlockLocations(
cluster.getNameNodeRpc(nnIndex), filePath, 0L, bytes.length);
} while (locatedBlocks.isUnderConstruction());
// OP_ADD_CACHE_POOL
filesystem.addCachePool(new CachePoolInfo("pool1"));
// OP_MODIFY_CACHE_POOL
filesystem.modifyCachePool(new CachePoolInfo("pool1").setLimit(99l));
// OP_ADD_PATH_BASED_CACHE_DIRECTIVE
long id = filesystem.addCacheDirective(
new CacheDirectiveInfo.Builder().
setPath(new Path("/path")).
setReplication((short)1).
setPool("pool1").
build(), EnumSet.of(CacheFlag.FORCE));
// OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE
filesystem.modifyCacheDirective(
new CacheDirectiveInfo.Builder().
setId(id).
setReplication((short)2).
build(), EnumSet.of(CacheFlag.FORCE));
// OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE
filesystem.removeCacheDirective(id);
// OP_REMOVE_CACHE_POOL
filesystem.removeCachePool("pool1");
// OP_SET_ACL
List<AclEntry> aclEntryList = Lists.newArrayList();
aclEntryList.add(
new AclEntry.Builder()
.setPermission(FsAction.READ_WRITE)
.setScope(AclEntryScope.ACCESS)
.setType(AclEntryType.USER)
.build());
aclEntryList.add(
new AclEntry.Builder()
.setName("user")
.setPermission(FsAction.READ_WRITE)
.setScope(AclEntryScope.ACCESS)
.setType(AclEntryType.USER)
.build());
aclEntryList.add(
new AclEntry.Builder()
.setPermission(FsAction.WRITE)
.setScope(AclEntryScope.ACCESS)
.setType(AclEntryType.GROUP)
.build());
aclEntryList.add(
new AclEntry.Builder()
.setPermission(FsAction.NONE)
.setScope(AclEntryScope.ACCESS)
.setType(AclEntryType.OTHER)
.build());
filesystem.setAcl(pathConcatTarget, aclEntryList);
// OP_SET_XATTR
filesystem.setXAttr(pathConcatTarget, "user.a1",
new byte[]{0x31, 0x32, 0x33});
filesystem.setXAttr(pathConcatTarget, "user.a2",
new byte[]{0x37, 0x38, 0x39});
// OP_REMOVE_XATTR
filesystem.removeXAttr(pathConcatTarget, "user.a2");
}
public static void abortStream(DFSOutputStream out) throws IOException {
out.abort();
}
public static byte[] asArray(ByteBuffer buf) {
byte arr[] = new byte[buf.remaining()];
buf.duplicate().get(arr);
return arr;
}
/**
* Blocks until cache usage hits the expected new value.
*/
public static long verifyExpectedCacheUsage(final long expectedCacheUsed,
final long expectedBlocks, final FsDatasetSpi<?> fsd) throws Exception {
GenericTestUtils.waitFor(new Supplier<Boolean>() {
private int tries = 0;
@Override
public Boolean get() {
long curCacheUsed = fsd.getCacheUsed();
long curBlocks = fsd.getNumBlocksCached();
if ((curCacheUsed != expectedCacheUsed) ||
(curBlocks != expectedBlocks)) {
if (tries++ > 10) {
LOG.info("verifyExpectedCacheUsage: have " +
curCacheUsed + "/" + expectedCacheUsed + " bytes cached; " +
curBlocks + "/" + expectedBlocks + " blocks cached. " +
"memlock limit = " +
NativeIO.POSIX.getCacheManipulator().getMemlockLimit() +
". Waiting...");
}
return false;
}
LOG.info("verifyExpectedCacheUsage: got " +
curCacheUsed + "/" + expectedCacheUsed + " bytes cached; " +
curBlocks + "/" + expectedBlocks + " blocks cached. " +
"memlock limit = " +
NativeIO.POSIX.getCacheManipulator().getMemlockLimit());
return true;
}
}, 100, 60000);
return expectedCacheUsed;
}
/**
* Round a long value up to a multiple of a factor.
*
* @param val The value.
* @param factor The factor to round up to. Must be > 1.
* @return The rounded value.
*/
public static long roundUpToMultiple(long val, int factor) {
assert (factor > 1);
long c = (val + factor - 1) / factor;
return c * factor;
}
public static void checkComponentsEquals(byte[][] expected, byte[][] actual) {
assertEquals("expected: " + DFSUtil.byteArray2PathString(expected)
+ ", actual: " + DFSUtil.byteArray2PathString(actual), expected.length,
actual.length);
int i = 0;
for (byte[] e : expected) {
byte[] actualComponent = actual[i++];
assertTrue("expected: " + DFSUtil.bytes2String(e) + ", actual: "
+ DFSUtil.bytes2String(actualComponent),
Arrays.equals(e, actualComponent));
}
}
/**
* A short-circuit test context which makes it easier to get a short-circuit
* configuration and set everything up.
*/
public static class ShortCircuitTestContext implements Closeable {
private final String testName;
private final TemporarySocketDirectory sockDir;
private boolean closed = false;
private final boolean formerTcpReadsDisabled;
public ShortCircuitTestContext(String testName) {
this.testName = testName;
this.sockDir = new TemporarySocketDirectory();
DomainSocket.disableBindPathValidation();
formerTcpReadsDisabled = DFSInputStream.tcpReadsDisabledForTesting;
Assume.assumeTrue(DomainSocket.getLoadingFailureReason() == null);
}
public Configuration newConfiguration() {
Configuration conf = new Configuration();
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
new File(sockDir.getDir(),
testName + "._PORT.sock").getAbsolutePath());
return conf;
}
public String getTestName() {
return testName;
}
public void close() throws IOException {
if (closed) return;
closed = true;
DFSInputStream.tcpReadsDisabledForTesting = formerTcpReadsDisabled;
sockDir.close();
}
}
/**
* Verify that two files have the same contents.
*
* @param fs The file system containing the two files.
* @param p1 The path of the first file.
* @param p2 The path of the second file.
* @param len The length of the two files.
* @throws IOException
*/
public static void verifyFilesEqual(FileSystem fs, Path p1, Path p2, int len)
throws IOException {
final FSDataInputStream in1 = fs.open(p1);
final FSDataInputStream in2 = fs.open(p2);
for (int i = 0; i < len; i++) {
assertEquals("Mismatch at byte " + i, in1.read(), in2.read());
}
in1.close();
in2.close();
}
/**
* Verify that two files have different contents.
*
* @param fs The file system containing the two files.
* @param p1 The path of the first file.
* @param p2 The path of the second file.
* @param len The length of the two files.
* @throws IOException
*/
public static void verifyFilesNotEqual(FileSystem fs, Path p1, Path p2,
int len)
throws IOException {
final FSDataInputStream in1 = fs.open(p1);
final FSDataInputStream in2 = fs.open(p2);
try {
for (int i = 0; i < len; i++) {
if (in1.read() != in2.read()) {
return;
}
}
fail("files are equal, but should not be");
} finally {
in1.close();
in2.close();
}
}
/**
* Helper function that verified blocks of a file are placed on the
* expected storage type.
*
* @param fs The file system containing the the file.
* @param client The DFS client used to access the file
* @param path name to the file to verify
* @param storageType expected storage type
* @returns true if file exists and its blocks are located on the expected
* storage type.
* false otherwise.
*/
public static boolean verifyFileReplicasOnStorageType(FileSystem fs,
DFSClient client, Path path, StorageType storageType) throws IOException {
if (!fs.exists(path)) {
LOG.info("verifyFileReplicasOnStorageType: file " + path + "does not exist");
return false;
}
long fileLength = client.getFileInfo(path.toString()).getLen();
LocatedBlocks locatedBlocks =
client.getLocatedBlocks(path.toString(), 0, fileLength);
for (LocatedBlock locatedBlock : locatedBlocks.getLocatedBlocks()) {
if (locatedBlock.getStorageTypes()[0] != storageType) {
LOG.info("verifyFileReplicasOnStorageType: for file " + path +
". Expect blk" + locatedBlock +
" on Type: " + storageType + ". Actual Type: " +
locatedBlock.getStorageTypes()[0]);
return false;
}
}
return true;
}
/**
* Helper function to create a key in the Key Provider. Defaults
* to the first indexed NameNode's Key Provider.
*
* @param keyName The name of the key to create
* @param cluster The cluster to create it in
* @param conf Configuration to use
*/
public static void createKey(String keyName, MiniDFSCluster cluster,
Configuration conf)
throws NoSuchAlgorithmException, IOException {
createKey(keyName, cluster, 0, conf);
}
/**
* Helper function to create a key in the Key Provider.
*
* @param keyName The name of the key to create
* @param cluster The cluster to create it in
* @param idx The NameNode index
* @param conf Configuration to use
*/
public static void createKey(String keyName, MiniDFSCluster cluster,
int idx, Configuration conf)
throws NoSuchAlgorithmException, IOException {
NameNode nn = cluster.getNameNode(idx);
KeyProvider provider = nn.getNamesystem().getProvider();
final KeyProvider.Options options = KeyProvider.options(conf);
options.setDescription(keyName);
options.setBitLength(128);
provider.createKey(keyName, options);
provider.flush();
}
/**
* @return the node which is expected to run the recovery of the
* given block, which is known to be under construction inside the
* given NameNOde.
*/
public static DatanodeDescriptor getExpectedPrimaryNode(NameNode nn,
ExtendedBlock blk) {
BlockManager bm0 = nn.getNamesystem().getBlockManager();
BlockInfo storedBlock = bm0.getStoredBlock(blk.getLocalBlock());
assertTrue("Block " + blk + " should be under construction, " +
"got: " + storedBlock,
storedBlock instanceof BlockInfoContiguousUnderConstruction);
BlockInfoContiguousUnderConstruction ucBlock =
(BlockInfoContiguousUnderConstruction)storedBlock;
// We expect that the replica with the most recent heart beat will be
// the one to be in charge of the synchronization / recovery protocol.
final DatanodeStorageInfo[] storages = ucBlock.getExpectedStorageLocations();
DatanodeStorageInfo expectedPrimary = storages[0];
long mostRecentLastUpdate = expectedPrimary.getDatanodeDescriptor()
.getLastUpdateMonotonic();
for (int i = 1; i < storages.length; i++) {
final long lastUpdate = storages[i].getDatanodeDescriptor()
.getLastUpdateMonotonic();
if (lastUpdate > mostRecentLastUpdate) {
expectedPrimary = storages[i];
mostRecentLastUpdate = lastUpdate;
}
}
return expectedPrimary.getDatanodeDescriptor();
}
public static void toolRun(Tool tool, String cmd, int retcode, String contain)
throws Exception {
String [] cmds = StringUtils.split(cmd, ' ');
System.out.flush();
System.err.flush();
PrintStream origOut = System.out;
PrintStream origErr = System.err;
String output = null;
int ret = 0;
try {
ByteArrayOutputStream bs = new ByteArrayOutputStream(1024);
PrintStream out = new PrintStream(bs);
System.setOut(out);
System.setErr(out);
ret = tool.run(cmds);
System.out.flush();
System.err.flush();
out.close();
output = bs.toString();
} finally {
System.setOut(origOut);
System.setErr(origErr);
}
System.out.println("Output for command: " + cmd + " retcode: " + ret);
if (output != null) {
System.out.println(output);
}
assertEquals(retcode, ret);
if (contain != null) {
assertTrue("The real output is: " + output + ".\n It should contain: "
+ contain, output.contains(contain));
}
}
public static void FsShellRun(String cmd, int retcode, String contain,
Configuration conf) throws Exception {
FsShell shell = new FsShell(new Configuration(conf));
toolRun(shell, cmd, retcode, contain);
}
public static void DFSAdminRun(String cmd, int retcode, String contain,
Configuration conf) throws Exception {
DFSAdmin admin = new DFSAdmin(new Configuration(conf));
toolRun(admin, cmd, retcode, contain);
}
public static void FsShellRun(String cmd, Configuration conf)
throws Exception {
FsShellRun(cmd, 0, null, conf);
}
public static void addDataNodeLayoutVersion(final int lv, final String description)
throws NoSuchFieldException, IllegalAccessException {
Preconditions.checkState(lv < DataNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
// Override {@link DataNodeLayoutVersion#CURRENT_LAYOUT_VERSION} via reflection.
Field modifiersField = Field.class.getDeclaredField("modifiers");
modifiersField.setAccessible(true);
Field field = DataNodeLayoutVersion.class.getField("CURRENT_LAYOUT_VERSION");
field.setAccessible(true);
modifiersField.setInt(field, field.getModifiers() & ~Modifier.FINAL);
field.setInt(null, lv);
field = HdfsServerConstants.class.getField("DATANODE_LAYOUT_VERSION");
field.setAccessible(true);
modifiersField.setInt(field, field.getModifiers() & ~Modifier.FINAL);
field.setInt(null, lv);
// Inject the feature into the FEATURES map.
final LayoutVersion.FeatureInfo featureInfo =
new LayoutVersion.FeatureInfo(lv, lv + 1, description, false);
final LayoutVersion.LayoutFeature feature =
new LayoutVersion.LayoutFeature() {
@Override
public LayoutVersion.FeatureInfo getInfo() {
return featureInfo;
}
};
// Update the FEATURES map with the new layout version.
LayoutVersion.updateMap(DataNodeLayoutVersion.FEATURES,
new LayoutVersion.LayoutFeature[] { feature });
}
/**
* Wait for datanode to reach alive or dead state for waitTime given in
* milliseconds.
*/
public static void waitForDatanodeState(
final MiniDFSCluster cluster, final String nodeID,
final boolean alive, int waitTime)
throws TimeoutException, InterruptedException {
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
FSNamesystem namesystem = cluster.getNamesystem();
final DatanodeDescriptor dd = BlockManagerTestUtil.getDatanode(
namesystem, nodeID);
return (dd.isAlive == alive);
}
}, 100, waitTime);
}
public static void setNameNodeLogLevel(Level level) {
GenericTestUtils.setLogLevel(FSNamesystem.LOG, level);
GenericTestUtils.setLogLevel(BlockManager.LOG, level);
GenericTestUtils.setLogLevel(LeaseManager.LOG, level);
GenericTestUtils.setLogLevel(NameNode.LOG, level);
GenericTestUtils.setLogLevel(NameNode.stateChangeLog, level);
GenericTestUtils.setLogLevel(NameNode.blockStateChangeLog, level);
}
/**
* Change the length of a block at datanode dnIndex
*/
public static boolean changeReplicaLength(MiniDFSCluster cluster,
ExtendedBlock blk, int dnIndex, int lenDelta) throws IOException {
File blockFile = cluster.getBlockFile(dnIndex, blk);
if (blockFile != null && blockFile.exists()) {
RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
raFile.setLength(raFile.length()+lenDelta);
raFile.close();
return true;
}
LOG.info("failed to change length of block " + blk);
return false;
}
/**
* Get the NamenodeProtocol RPC proxy for the NN associated with this
* DFSClient object
*
* @param nameNodeUri the URI of the NN to get a proxy for.
*
* @return the Namenode RPC proxy associated with this DFSClient object
*/
@VisibleForTesting
public static NamenodeProtocol getNamenodeProtocolProxy(Configuration conf,
URI nameNodeUri, UserGroupInformation ugi)
throws IOException {
return NameNodeProxies.createNonHAProxy(conf,
NameNode.getAddress(nameNodeUri), NamenodeProtocol.class, ugi, false).
getProxy();
}
/**
* Get the RefreshUserMappingsProtocol RPC proxy for the NN associated with
* this DFSClient object
*
* @param nameNodeUri the URI of the NN to get a proxy for.
*
* @return the RefreshUserMappingsProtocol RPC proxy associated with this
* DFSClient object
*/
@VisibleForTesting
public static RefreshUserMappingsProtocol getRefreshUserMappingsProtocolProxy(
Configuration conf, URI nameNodeUri) throws IOException {
final AtomicBoolean nnFallbackToSimpleAuth = new AtomicBoolean(false);
return NameNodeProxies.createProxy(conf,
nameNodeUri, RefreshUserMappingsProtocol.class,
nnFallbackToSimpleAuth).getProxy();
}
/**
* Set the datanode dead
*/
public static void setDatanodeDead(DatanodeInfo dn) {
dn.setLastUpdate(0);
dn.setLastUpdateMonotonic(0);
}
/**
* Update lastUpdate and lastUpdateMonotonic with some offset.
*/
public static void resetLastUpdatesWithOffset(DatanodeInfo dn, long offset) {
dn.setLastUpdate(Time.now() + offset);
dn.setLastUpdateMonotonic(Time.monotonicNow() + offset);
}
/**
* This method takes a set of block locations and fills the provided buffer
* with expected bytes based on simulated content from
* {@link SimulatedFSDataset}.
*
* @param lbs The block locations of a file
* @param expected The buffer to be filled with expected bytes on the above
* locations.
*/
public static void fillExpectedBuf(LocatedBlocks lbs, byte[] expected) {
Block[] blks = new Block[lbs.getLocatedBlocks().size()];
for (int i = 0; i < lbs.getLocatedBlocks().size(); i++) {
blks[i] = lbs.getLocatedBlocks().get(i).getBlock().getLocalBlock();
}
int bufPos = 0;
for (Block b : blks) {
for (long blkPos = 0; blkPos < b.getNumBytes(); blkPos++) {
assert bufPos < expected.length;
expected[bufPos++] = SimulatedFSDataset.simulatedByte(b, blkPos);
}
}
}
}
| 66,963 | 34.809626 | 102 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.IOException;
import java.util.Random;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.namenode.FSImage;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.PathUtils;
import org.apache.log4j.Level;
import org.junit.Test;
/**
* A JUnit test for checking if restarting DFS preserves the
* blocks that are part of an unclosed file.
*/
public class TestPersistBlocks {
static {
((Log4JLogger)FSImage.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
}
private static final int BLOCK_SIZE = 4096;
private static final int NUM_BLOCKS = 5;
private static final String FILE_NAME = "/data";
private static final Path FILE_PATH = new Path(FILE_NAME);
static final byte[] DATA_BEFORE_RESTART = new byte[BLOCK_SIZE * NUM_BLOCKS];
static final byte[] DATA_AFTER_RESTART = new byte[BLOCK_SIZE * NUM_BLOCKS];
private static final String HADOOP_1_0_MULTIBLOCK_TGZ =
"hadoop-1.0-multiblock-file.tgz";
static {
Random rand = new Random();
rand.nextBytes(DATA_BEFORE_RESTART);
rand.nextBytes(DATA_AFTER_RESTART);
}
/** check if DFS remains in proper condition after a restart */
@Test
public void testRestartDfs() throws Exception {
final Configuration conf = new HdfsConfiguration();
// Turn off persistent IPC, so that the DFSClient can survive NN restart
conf.setInt(
CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
0);
MiniDFSCluster cluster = null;
long len = 0;
FSDataOutputStream stream;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
FileSystem fs = cluster.getFileSystem();
// Creating a file with 4096 blockSize to write multiple blocks
stream = fs.create(FILE_PATH, true, BLOCK_SIZE, (short) 1, BLOCK_SIZE);
stream.write(DATA_BEFORE_RESTART);
stream.hflush();
// Wait for at least a few blocks to get through
while (len <= BLOCK_SIZE) {
FileStatus status = fs.getFileStatus(FILE_PATH);
len = status.getLen();
Thread.sleep(100);
}
// explicitly do NOT close the file.
cluster.restartNameNode();
// Check that the file has no less bytes than before the restart
// This would mean that blocks were successfully persisted to the log
FileStatus status = fs.getFileStatus(FILE_PATH);
assertTrue("Length too short: " + status.getLen(),
status.getLen() >= len);
// And keep writing (ensures that leases are also persisted correctly)
stream.write(DATA_AFTER_RESTART);
stream.close();
// Verify that the data showed up, both from before and after the restart.
FSDataInputStream readStream = fs.open(FILE_PATH);
try {
byte[] verifyBuf = new byte[DATA_BEFORE_RESTART.length];
IOUtils.readFully(readStream, verifyBuf, 0, verifyBuf.length);
assertArrayEquals(DATA_BEFORE_RESTART, verifyBuf);
IOUtils.readFully(readStream, verifyBuf, 0, verifyBuf.length);
assertArrayEquals(DATA_AFTER_RESTART, verifyBuf);
} finally {
IOUtils.closeStream(readStream);
}
} finally {
if (cluster != null) { cluster.shutdown(); }
}
}
@Test
public void testRestartDfsWithAbandonedBlock() throws Exception {
final Configuration conf = new HdfsConfiguration();
// Turn off persistent IPC, so that the DFSClient can survive NN restart
conf.setInt(
CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
0);
MiniDFSCluster cluster = null;
long len = 0;
FSDataOutputStream stream;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
FileSystem fs = cluster.getFileSystem();
// Creating a file with 4096 blockSize to write multiple blocks
stream = fs.create(FILE_PATH, true, BLOCK_SIZE, (short) 1, BLOCK_SIZE);
stream.write(DATA_BEFORE_RESTART);
stream.hflush();
// Wait for all of the blocks to get through
while (len < BLOCK_SIZE * (NUM_BLOCKS - 1)) {
FileStatus status = fs.getFileStatus(FILE_PATH);
len = status.getLen();
Thread.sleep(100);
}
// Abandon the last block
DFSClient dfsclient = DFSClientAdapter.getDFSClient((DistributedFileSystem)fs);
HdfsFileStatus fileStatus = dfsclient.getNamenode().getFileInfo(FILE_NAME);
LocatedBlocks blocks = dfsclient.getNamenode().getBlockLocations(
FILE_NAME, 0, BLOCK_SIZE * NUM_BLOCKS);
assertEquals(NUM_BLOCKS, blocks.getLocatedBlocks().size());
LocatedBlock b = blocks.getLastLocatedBlock();
dfsclient.getNamenode().abandonBlock(b.getBlock(), fileStatus.getFileId(),
FILE_NAME, dfsclient.clientName);
// explicitly do NOT close the file.
cluster.restartNameNode();
// Check that the file has no less bytes than before the restart
// This would mean that blocks were successfully persisted to the log
FileStatus status = fs.getFileStatus(FILE_PATH);
assertTrue("Length incorrect: " + status.getLen(),
status.getLen() == len - BLOCK_SIZE);
// Verify the data showed up from before restart, sans abandoned block.
FSDataInputStream readStream = fs.open(FILE_PATH);
try {
byte[] verifyBuf = new byte[DATA_BEFORE_RESTART.length - BLOCK_SIZE];
IOUtils.readFully(readStream, verifyBuf, 0, verifyBuf.length);
byte[] expectedBuf = new byte[DATA_BEFORE_RESTART.length - BLOCK_SIZE];
System.arraycopy(DATA_BEFORE_RESTART, 0,
expectedBuf, 0, expectedBuf.length);
assertArrayEquals(expectedBuf, verifyBuf);
} finally {
IOUtils.closeStream(readStream);
}
} finally {
if (cluster != null) { cluster.shutdown(); }
}
}
@Test
public void testRestartWithPartialBlockHflushed() throws IOException {
final Configuration conf = new HdfsConfiguration();
// Turn off persistent IPC, so that the DFSClient can survive NN restart
conf.setInt(
CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
0);
MiniDFSCluster cluster = null;
FSDataOutputStream stream;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
FileSystem fs = cluster.getFileSystem();
NameNode.getAddress(conf).getPort();
// Creating a file with 4096 blockSize to write multiple blocks
stream = fs.create(FILE_PATH, true, BLOCK_SIZE, (short) 1, BLOCK_SIZE);
stream.write(DATA_BEFORE_RESTART);
stream.write((byte)1);
stream.hflush();
// explicitly do NOT close the file before restarting the NN.
cluster.restartNameNode();
// this will fail if the final block of the file is prematurely COMPLETEd
stream.write((byte)2);
stream.hflush();
stream.close();
assertEquals(DATA_BEFORE_RESTART.length + 2,
fs.getFileStatus(FILE_PATH).getLen());
FSDataInputStream readStream = fs.open(FILE_PATH);
try {
byte[] verifyBuf = new byte[DATA_BEFORE_RESTART.length + 2];
IOUtils.readFully(readStream, verifyBuf, 0, verifyBuf.length);
byte[] expectedBuf = new byte[DATA_BEFORE_RESTART.length + 2];
System.arraycopy(DATA_BEFORE_RESTART, 0, expectedBuf, 0,
DATA_BEFORE_RESTART.length);
System.arraycopy(new byte[]{1, 2}, 0, expectedBuf,
DATA_BEFORE_RESTART.length, 2);
assertArrayEquals(expectedBuf, verifyBuf);
} finally {
IOUtils.closeStream(readStream);
}
} finally {
if (cluster != null) { cluster.shutdown(); }
}
}
@Test
public void testRestartWithAppend() throws IOException {
final Configuration conf = new HdfsConfiguration();
// Turn off persistent IPC, so that the DFSClient can survive NN restart
conf.setInt(
CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
0);
MiniDFSCluster cluster = null;
FSDataOutputStream stream;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
FileSystem fs = cluster.getFileSystem();
NameNode.getAddress(conf).getPort();
// Creating a file with 4096 blockSize to write multiple blocks
stream = fs.create(FILE_PATH, true, BLOCK_SIZE, (short) 1, BLOCK_SIZE);
stream.write(DATA_BEFORE_RESTART, 0, DATA_BEFORE_RESTART.length / 2);
stream.close();
stream = fs.append(FILE_PATH, BLOCK_SIZE);
stream.write(DATA_BEFORE_RESTART, DATA_BEFORE_RESTART.length / 2,
DATA_BEFORE_RESTART.length / 2);
stream.close();
assertEquals(DATA_BEFORE_RESTART.length,
fs.getFileStatus(FILE_PATH).getLen());
cluster.restartNameNode();
assertEquals(DATA_BEFORE_RESTART.length,
fs.getFileStatus(FILE_PATH).getLen());
FSDataInputStream readStream = fs.open(FILE_PATH);
try {
byte[] verifyBuf = new byte[DATA_BEFORE_RESTART.length];
IOUtils.readFully(readStream, verifyBuf, 0, verifyBuf.length);
assertArrayEquals(DATA_BEFORE_RESTART, verifyBuf);
} finally {
IOUtils.closeStream(readStream);
}
} finally {
if (cluster != null) { cluster.shutdown(); }
}
}
/**
* Earlier versions of HDFS didn't persist block allocation to the edit log.
* This makes sure that we can still load an edit log when the OP_CLOSE
* is the opcode which adds all of the blocks. This is a regression
* test for HDFS-2773.
* This test uses a tarred pseudo-distributed cluster from Hadoop 1.0
* which has a multi-block file. This is similar to the tests in
* {@link TestDFSUpgradeFromImage} but none of those images include
* a multi-block file.
*/
@Test
public void testEarlierVersionEditLog() throws Exception {
final Configuration conf = new HdfsConfiguration();
String tarFile = System.getProperty("test.cache.data", "build/test/cache")
+ "/" + HADOOP_1_0_MULTIBLOCK_TGZ;
String testDir = PathUtils.getTestDirName(getClass());
File dfsDir = new File(testDir, "image-1.0");
if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
}
FileUtil.unTar(new File(tarFile), new File(testDir));
File nameDir = new File(dfsDir, "name");
GenericTestUtils.assertExists(nameDir);
File dataDir = new File(dfsDir, "data");
GenericTestUtils.assertExists(dataDir);
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath());
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDir.getAbsolutePath());
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
.format(false)
.manageDataDfsDirs(false)
.manageNameDfsDirs(false)
.numDataNodes(1)
.startupOption(StartupOption.UPGRADE)
.build();
try {
FileSystem fs = cluster.getFileSystem();
Path testPath = new Path("/user/todd/4blocks");
// Read it without caring about the actual data within - we just need
// to make sure that the block states and locations are OK.
DFSTestUtil.readFile(fs, testPath);
// Ensure that we can append to it - if the blocks were in some funny
// state we'd get some kind of issue here.
FSDataOutputStream stm = fs.append(testPath);
try {
stm.write(1);
} finally {
IOUtils.closeStream(stm);
}
} finally {
cluster.shutdown();
}
}
}
| 13,540 | 37.91092 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.VersionInfo;
import org.junit.Test;
import java.net.InetSocketAddress;
import java.security.Permission;
import static org.junit.Assert.*;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
/**
* This class tests that a file need not be closed before its
* data can be read by another client.
*/
public class TestDatanodeRegistration {
public static final Log LOG = LogFactory.getLog(TestDatanodeRegistration.class);
private static class MonitorDNS extends SecurityManager {
int lookups = 0;
@Override
public void checkPermission(Permission perm) {}
@Override
public void checkConnect(String host, int port) {
if (port == -1) {
lookups++;
}
}
}
/**
* Ensure the datanode manager does not do host lookup after registration,
* especially for node reports.
* @throws Exception
*/
@Test
public void testDNSLookups() throws Exception {
MonitorDNS sm = new MonitorDNS();
System.setSecurityManager(sm);
MiniDFSCluster cluster = null;
try {
HdfsConfiguration conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(8).build();
cluster.waitActive();
int initialLookups = sm.lookups;
assertTrue("dns security manager is active", initialLookups != 0);
DatanodeManager dm =
cluster.getNamesystem().getBlockManager().getDatanodeManager();
// make sure no lookups occur
dm.refreshNodes(conf);
assertEquals(initialLookups, sm.lookups);
dm.refreshNodes(conf);
assertEquals(initialLookups, sm.lookups);
// ensure none of the reports trigger lookups
dm.getDatanodeListForReport(DatanodeReportType.ALL);
assertEquals(initialLookups, sm.lookups);
dm.getDatanodeListForReport(DatanodeReportType.LIVE);
assertEquals(initialLookups, sm.lookups);
dm.getDatanodeListForReport(DatanodeReportType.DEAD);
assertEquals(initialLookups, sm.lookups);
} finally {
if (cluster != null) {
cluster.shutdown();
}
System.setSecurityManager(null);
}
}
/**
* Regression test for HDFS-894 ensures that, when datanodes
* are restarted, the new IPC port is registered with the
* namenode.
*/
@Test
public void testChangeIpcPort() throws Exception {
HdfsConfiguration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).build();
InetSocketAddress addr = new InetSocketAddress(
"localhost",
cluster.getNameNodePort());
DFSClient client = new DFSClient(addr, conf);
// Restart datanodes
cluster.restartDataNodes();
// Wait until we get a heartbeat from the new datanode
DatanodeInfo[] report = client.datanodeReport(DatanodeReportType.ALL);
long firstUpdateAfterRestart = report[0].getLastUpdate();
boolean gotHeartbeat = false;
for (int i = 0; i < 10 && !gotHeartbeat; i++) {
try {
Thread.sleep(i*1000);
} catch (InterruptedException ie) {}
report = client.datanodeReport(DatanodeReportType.ALL);
gotHeartbeat = (report[0].getLastUpdate() > firstUpdateAfterRestart);
}
if (!gotHeartbeat) {
fail("Never got a heartbeat from restarted datanode.");
}
int realIpcPort = cluster.getDataNodes().get(0).getIpcPort();
// Now make sure the reported IPC port is the correct one.
assertEquals(realIpcPort, report[0].getIpcPort());
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test
public void testChangeStorageID() throws Exception {
final String DN_IP_ADDR = "127.0.0.1";
final String DN_HOSTNAME = "localhost";
final int DN_XFER_PORT = 12345;
final int DN_INFO_PORT = 12346;
final int DN_INFO_SECURE_PORT = 12347;
final int DN_IPC_PORT = 12348;
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(0)
.build();
InetSocketAddress addr = new InetSocketAddress(
"localhost",
cluster.getNameNodePort());
DFSClient client = new DFSClient(addr, conf);
NamenodeProtocols rpcServer = cluster.getNameNodeRpc();
// register a datanode
DatanodeID dnId = new DatanodeID(DN_IP_ADDR, DN_HOSTNAME,
"fake-datanode-id", DN_XFER_PORT, DN_INFO_PORT, DN_INFO_SECURE_PORT,
DN_IPC_PORT);
long nnCTime = cluster.getNamesystem().getFSImage().getStorage()
.getCTime();
StorageInfo mockStorageInfo = mock(StorageInfo.class);
doReturn(nnCTime).when(mockStorageInfo).getCTime();
doReturn(HdfsServerConstants.DATANODE_LAYOUT_VERSION).when(mockStorageInfo)
.getLayoutVersion();
DatanodeRegistration dnReg = new DatanodeRegistration(dnId,
mockStorageInfo, null, VersionInfo.getVersion());
rpcServer.registerDatanode(dnReg);
DatanodeInfo[] report = client.datanodeReport(DatanodeReportType.ALL);
assertEquals("Expected a registered datanode", 1, report.length);
// register the same datanode again with a different storage ID
dnId = new DatanodeID(DN_IP_ADDR, DN_HOSTNAME,
"changed-fake-datanode-id", DN_XFER_PORT, DN_INFO_PORT,
DN_INFO_SECURE_PORT, DN_IPC_PORT);
dnReg = new DatanodeRegistration(dnId,
mockStorageInfo, null, VersionInfo.getVersion());
rpcServer.registerDatanode(dnReg);
report = client.datanodeReport(DatanodeReportType.ALL);
assertEquals("Datanode with changed storage ID not recognized",
1, report.length);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test
public void testRegistrationWithDifferentSoftwareVersions() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY, "3.0.0");
conf.set(DFSConfigKeys.DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_KEY, "3.0.0");
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(0)
.build();
NamenodeProtocols rpcServer = cluster.getNameNodeRpc();
long nnCTime = cluster.getNamesystem().getFSImage().getStorage().getCTime();
StorageInfo mockStorageInfo = mock(StorageInfo.class);
doReturn(nnCTime).when(mockStorageInfo).getCTime();
DatanodeRegistration mockDnReg = mock(DatanodeRegistration.class);
doReturn(HdfsServerConstants.DATANODE_LAYOUT_VERSION).when(mockDnReg).getVersion();
doReturn("127.0.0.1").when(mockDnReg).getIpAddr();
doReturn(123).when(mockDnReg).getXferPort();
doReturn("fake-storage-id").when(mockDnReg).getDatanodeUuid();
doReturn(mockStorageInfo).when(mockDnReg).getStorageInfo();
// Should succeed when software versions are the same.
doReturn("3.0.0").when(mockDnReg).getSoftwareVersion();
rpcServer.registerDatanode(mockDnReg);
// Should succeed when software version of DN is above minimum required by NN.
doReturn("4.0.0").when(mockDnReg).getSoftwareVersion();
rpcServer.registerDatanode(mockDnReg);
// Should fail when software version of DN is below minimum required by NN.
doReturn("2.0.0").when(mockDnReg).getSoftwareVersion();
try {
rpcServer.registerDatanode(mockDnReg);
fail("Should not have been able to register DN with too-low version.");
} catch (IncorrectVersionException ive) {
GenericTestUtils.assertExceptionContains(
"The reported DataNode version is too low", ive);
LOG.info("Got expected exception", ive);
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test
public void testRegistrationWithDifferentSoftwareVersionsDuringUpgrade()
throws Exception {
Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY, "1.0.0");
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(0)
.build();
NamenodeProtocols rpcServer = cluster.getNameNodeRpc();
long nnCTime = cluster.getNamesystem().getFSImage().getStorage().getCTime();
StorageInfo mockStorageInfo = mock(StorageInfo.class);
doReturn(nnCTime).when(mockStorageInfo).getCTime();
DatanodeRegistration mockDnReg = mock(DatanodeRegistration.class);
doReturn(HdfsServerConstants.DATANODE_LAYOUT_VERSION).when(mockDnReg).getVersion();
doReturn("fake-storage-id").when(mockDnReg).getDatanodeUuid();
doReturn(mockStorageInfo).when(mockDnReg).getStorageInfo();
// Should succeed when software versions are the same and CTimes are the
// same.
doReturn(VersionInfo.getVersion()).when(mockDnReg).getSoftwareVersion();
doReturn("127.0.0.1").when(mockDnReg).getIpAddr();
doReturn(123).when(mockDnReg).getXferPort();
rpcServer.registerDatanode(mockDnReg);
// Should succeed when software versions are the same and CTimes are
// different.
doReturn(nnCTime + 1).when(mockStorageInfo).getCTime();
rpcServer.registerDatanode(mockDnReg);
// Should fail when software version of DN is different from NN and CTimes
// are different.
doReturn(VersionInfo.getVersion() + ".1").when(mockDnReg).getSoftwareVersion();
try {
rpcServer.registerDatanode(mockDnReg);
fail("Should not have been able to register DN with different software" +
" versions and CTimes");
} catch (IncorrectVersionException ive) {
GenericTestUtils.assertExceptionContains(
"does not match CTime of NN", ive);
LOG.info("Got expected exception", ive);
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
}
| 11,806 | 36.842949 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.filefilter.DirectoryFileFilter;
import org.apache.commons.io.filefilter.PrefixFileFilter;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.PathUtils;
import org.apache.log4j.Level;
import org.junit.Test;
import org.slf4j.Logger;
/**
* A JUnit test for corrupted file handling.
*/
public class TestFileCorruption {
{
DFSTestUtil.setNameNodeLogLevel(Level.ALL);
GenericTestUtils.setLogLevel(DataNode.LOG, Level.ALL);
GenericTestUtils.setLogLevel(DFSClient.LOG, Level.ALL);
}
static Logger LOG = NameNode.stateChangeLog;
/** check if DFS can handle corrupted blocks properly */
@Test
public void testFileCorruption() throws Exception {
MiniDFSCluster cluster = null;
DFSTestUtil util = new DFSTestUtil.Builder().setName("TestFileCorruption").
setNumFiles(20).build();
try {
Configuration conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
FileSystem fs = cluster.getFileSystem();
util.createFiles(fs, "/srcdat");
// Now deliberately remove the blocks
File storageDir = cluster.getInstanceStorageDir(2, 0);
String bpid = cluster.getNamesystem().getBlockPoolId();
File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
assertTrue("data directory does not exist", data_dir.exists());
Collection<File> blocks = FileUtils.listFiles(data_dir,
new PrefixFileFilter(Block.BLOCK_FILE_PREFIX),
DirectoryFileFilter.DIRECTORY);
assertTrue("Blocks do not exist in data-dir", blocks.size() > 0);
for (File block : blocks) {
System.out.println("Deliberately removing file " + block.getName());
assertTrue("Cannot remove file.", block.delete());
}
assertTrue("Corrupted replicas not handled properly.",
util.checkFiles(fs, "/srcdat"));
util.cleanup(fs, "/srcdat");
} finally {
if (cluster != null) { cluster.shutdown(); }
}
}
/** check if local FS can handle corrupted blocks properly */
@Test
public void testLocalFileCorruption() throws Exception {
Configuration conf = new HdfsConfiguration();
Path file = new Path(PathUtils.getTestDirName(getClass()), "corruptFile");
FileSystem fs = FileSystem.getLocal(conf);
DataOutputStream dos = fs.create(file);
dos.writeBytes("original bytes");
dos.close();
// Now deliberately corrupt the file
dos = new DataOutputStream(new FileOutputStream(file.toString()));
dos.writeBytes("corruption");
dos.close();
// Now attempt to read the file
DataInputStream dis = fs.open(file, 512);
try {
System.out.println("A ChecksumException is expected to be logged.");
dis.readByte();
} catch (ChecksumException ignore) {
//expect this exception but let any NPE get thrown
}
fs.delete(file, true);
}
/** Test the case that a replica is reported corrupt while it is not
* in blocksMap. Make sure that ArrayIndexOutOfBounds does not thrown.
* See Hadoop-4351.
*/
@Test
public void testArrayOutOfBoundsException() throws Exception {
MiniDFSCluster cluster = null;
try {
Configuration conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
final Path FILE_PATH = new Path("/tmp.txt");
final long FILE_LEN = 1L;
DFSTestUtil.createFile(fs, FILE_PATH, FILE_LEN, (short)2, 1L);
// get the block
final String bpid = cluster.getNamesystem().getBlockPoolId();
File storageDir = cluster.getInstanceStorageDir(0, 0);
File dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
assertTrue("Data directory does not exist", dataDir.exists());
ExtendedBlock blk = getBlock(bpid, dataDir);
if (blk == null) {
storageDir = cluster.getInstanceStorageDir(0, 1);
dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
blk = getBlock(bpid, dataDir);
}
assertFalse("Data directory does not contain any blocks or there was an "
+ "IO error", blk==null);
// start a third datanode
cluster.startDataNodes(conf, 1, true, null, null);
ArrayList<DataNode> datanodes = cluster.getDataNodes();
assertEquals(datanodes.size(), 3);
DataNode dataNode = datanodes.get(2);
// report corrupted block by the third datanode
DatanodeRegistration dnR =
DataNodeTestUtils.getDNRegistrationForBP(dataNode, blk.getBlockPoolId());
FSNamesystem ns = cluster.getNamesystem();
ns.writeLock();
try {
cluster.getNamesystem().getBlockManager().findAndMarkBlockAsCorrupt(
blk, new DatanodeInfo(dnR), "TEST", "STORAGE_ID");
} finally {
ns.writeUnlock();
}
// open the file
fs.open(FILE_PATH);
//clean up
fs.delete(FILE_PATH, false);
} finally {
if (cluster != null) { cluster.shutdown(); }
}
}
public static ExtendedBlock getBlock(String bpid, File dataDir) {
List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(dataDir);
if (metadataFiles == null || metadataFiles.isEmpty()) {
return null;
}
File metadataFile = metadataFiles.get(0);
File blockFile = Block.metaToBlockFile(metadataFile);
return new ExtendedBlock(bpid, Block.getBlockId(blockFile.getName()),
blockFile.length(), Block.getGenerationStamp(metadataFile.getName()));
}
}
| 7,468 | 37.699482 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.util.Arrays;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.junit.After;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
/**
* This class tests the cases of a concurrent reads/writes to a file;
* ie, one writer and one or more readers can see unfinsihed blocks
*/
public class TestFileConcurrentReader {
private enum SyncType {
SYNC,
APPEND,
}
private static final Logger LOG =
Logger.getLogger(TestFileConcurrentReader.class);
{
((Log4JLogger) LeaseManager.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger) DFSClient.LOG).getLogger().setLevel(Level.ALL);
}
static final long seed = 0xDEADBEEFL;
static final int blockSize = 8192;
private static final int DEFAULT_WRITE_SIZE = 1024 + 1;
private static final int SMALL_WRITE_SIZE = 61;
private Configuration conf;
private MiniDFSCluster cluster;
private FileSystem fileSystem;
@Before
public void setUp() throws IOException {
conf = new Configuration();
init(conf);
}
@After
public void tearDown() throws Exception {
cluster.shutdown();
cluster = null;
}
private void init(Configuration conf) throws IOException {
if (cluster != null) {
cluster.shutdown();
}
cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitClusterUp();
fileSystem = cluster.getFileSystem();
}
private void writeFileAndSync(FSDataOutputStream stm, int size)
throws IOException {
byte[] buffer = DFSTestUtil.generateSequentialBytes(0, size);
stm.write(buffer, 0, size);
stm.hflush();
}
private void checkCanRead(FileSystem fileSys, Path path, int numBytes)
throws IOException {
waitForBlocks(fileSys, path);
assertBytesAvailable(fileSys, path, numBytes);
}
// make sure bytes are available and match expected
private void assertBytesAvailable(
FileSystem fileSystem,
Path path,
int numBytes
) throws IOException {
byte[] buffer = new byte[numBytes];
FSDataInputStream inputStream = fileSystem.open(path);
IOUtils.readFully(inputStream, buffer, 0, numBytes);
inputStream.close();
assertTrue(
"unable to validate bytes",
validateSequentialBytes(buffer, 0, numBytes)
);
}
private void waitForBlocks(FileSystem fileSys, Path name)
throws IOException {
// wait until we have at least one block in the file to read.
boolean done = false;
while (!done) {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
}
done = true;
BlockLocation[] locations = fileSys.getFileBlockLocations(
fileSys.getFileStatus(name), 0, blockSize);
if (locations.length < 1) {
done = false;
continue;
}
}
}
/**
* Test that that writes to an incomplete block are available to a reader
*/
@Test (timeout = 30000)
public void testUnfinishedBlockRead()
throws IOException {
// create a new file in the root, write data, do no close
Path file1 = new Path("/unfinished-block");
FSDataOutputStream stm = TestFileCreation.createFile(fileSystem, file1, 1);
// write partial block and sync
int partialBlockSize = blockSize / 2;
writeFileAndSync(stm, partialBlockSize);
// Make sure a client can read it before it is closed
checkCanRead(fileSystem, file1, partialBlockSize);
stm.close();
}
/**
* test case: if the BlockSender decides there is only one packet to send,
* the previous computation of the pktSize based on transferToAllowed
* would result in too small a buffer to do the buffer-copy needed
* for partial chunks.
*/
@Test (timeout = 30000)
public void testUnfinishedBlockPacketBufferOverrun() throws IOException {
// check that / exists
Path path = new Path("/");
System.out.println("Path : \"" + path.toString() + "\"");
// create a new file in the root, write data, do no close
Path file1 = new Path("/unfinished-block");
final FSDataOutputStream stm =
TestFileCreation.createFile(fileSystem, file1, 1);
// write partial block and sync
final int bytesPerChecksum = conf.getInt("io.bytes.per.checksum", 512);
final int partialBlockSize = bytesPerChecksum - 1;
writeFileAndSync(stm, partialBlockSize);
// Make sure a client can read it before it is closed
checkCanRead(fileSystem, file1, partialBlockSize);
stm.close();
}
// use a small block size and a large write so that DN is busy creating
// new blocks. This makes it almost 100% sure we can reproduce
// case of client getting a DN that hasn't yet created the blocks
@Test (timeout = 30000)
public void testImmediateReadOfNewFile()
throws IOException {
final int blockSize = 64 * 1024;
final int writeSize = 10 * blockSize;
Configuration conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
init(conf);
final int requiredSuccessfulOpens = 100;
final Path file = new Path("/file1");
final AtomicBoolean openerDone = new AtomicBoolean(false);
final AtomicReference<String> errorMessage = new AtomicReference<String>();
final FSDataOutputStream out = fileSystem.create(file);
final Thread writer = new Thread(new Runnable() {
@Override
public void run() {
try {
while (!openerDone.get()) {
out.write(DFSTestUtil.generateSequentialBytes(0, writeSize));
out.hflush();
}
} catch (IOException e) {
LOG.warn("error in writer", e);
} finally {
try {
out.close();
} catch (IOException e) {
LOG.error("unable to close file");
}
}
}
});
Thread opener = new Thread(new Runnable() {
@Override
public void run() {
try {
for (int i = 0; i < requiredSuccessfulOpens; i++) {
fileSystem.open(file).close();
}
openerDone.set(true);
} catch (IOException e) {
openerDone.set(true);
errorMessage.set(String.format(
"got exception : %s",
StringUtils.stringifyException(e)
));
} catch (Exception e) {
openerDone.set(true);
errorMessage.set(String.format(
"got exception : %s",
StringUtils.stringifyException(e)
));
writer.interrupt();
fail("here");
}
}
});
writer.start();
opener.start();
try {
writer.join();
opener.join();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
assertNull(errorMessage.get(), errorMessage.get());
}
// for some reason, using tranferTo evokes the race condition more often
// so test separately
@Test (timeout = 30000)
public void testUnfinishedBlockCRCErrorTransferTo() throws IOException {
runTestUnfinishedBlockCRCError(true, SyncType.SYNC, DEFAULT_WRITE_SIZE);
}
@Test (timeout = 30000)
public void testUnfinishedBlockCRCErrorTransferToVerySmallWrite()
throws IOException {
runTestUnfinishedBlockCRCError(true, SyncType.SYNC, SMALL_WRITE_SIZE);
}
// fails due to issue w/append, disable
@Ignore
public void _testUnfinishedBlockCRCErrorTransferToAppend()
throws IOException {
runTestUnfinishedBlockCRCError(true, SyncType.APPEND, DEFAULT_WRITE_SIZE);
}
@Test (timeout = 30000)
public void testUnfinishedBlockCRCErrorNormalTransfer() throws IOException {
runTestUnfinishedBlockCRCError(false, SyncType.SYNC, DEFAULT_WRITE_SIZE);
}
@Test (timeout = 30000)
public void testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite()
throws IOException {
runTestUnfinishedBlockCRCError(false, SyncType.SYNC, SMALL_WRITE_SIZE);
}
// fails due to issue w/append, disable
@Ignore
public void _testUnfinishedBlockCRCErrorNormalTransferAppend()
throws IOException {
runTestUnfinishedBlockCRCError(false, SyncType.APPEND, DEFAULT_WRITE_SIZE);
}
private void runTestUnfinishedBlockCRCError(
final boolean transferToAllowed, SyncType syncType, int writeSize
) throws IOException {
runTestUnfinishedBlockCRCError(
transferToAllowed, syncType, writeSize, new Configuration()
);
}
private void runTestUnfinishedBlockCRCError(
final boolean transferToAllowed,
final SyncType syncType,
final int writeSize,
Configuration conf
) throws IOException {
conf.setBoolean(DFSConfigKeys.DFS_DATANODE_TRANSFERTO_ALLOWED_KEY,
transferToAllowed);
init(conf);
final Path file = new Path("/block-being-written-to");
final int numWrites = 2000;
final AtomicBoolean writerDone = new AtomicBoolean(false);
final AtomicBoolean writerStarted = new AtomicBoolean(false);
final AtomicBoolean error = new AtomicBoolean(false);
final Thread writer = new Thread(new Runnable() {
@Override
public void run() {
try {
FSDataOutputStream outputStream = fileSystem.create(file);
if (syncType == SyncType.APPEND) {
outputStream.close();
outputStream = fileSystem.append(file);
}
try {
for (int i = 0; !error.get() && i < numWrites; i++) {
final byte[] writeBuf =
DFSTestUtil.generateSequentialBytes(i * writeSize, writeSize);
outputStream.write(writeBuf);
if (syncType == SyncType.SYNC) {
outputStream.hflush();
}
writerStarted.set(true);
}
} catch (IOException e) {
error.set(true);
LOG.error("error writing to file", e);
} finally {
outputStream.close();
}
writerDone.set(true);
} catch (Exception e) {
LOG.error("error in writer", e);
throw new RuntimeException(e);
}
}
});
Thread tailer = new Thread(new Runnable() {
@Override
public void run() {
try {
long startPos = 0;
while (!writerDone.get() && !error.get()) {
if (writerStarted.get()) {
try {
startPos = tailFile(file, startPos);
} catch (IOException e) {
LOG.error(String.format("error tailing file %s", file), e);
throw new RuntimeException(e);
}
}
}
} catch (RuntimeException e) {
if (e.getCause() instanceof ChecksumException) {
error.set(true);
}
writer.interrupt();
LOG.error("error in tailer", e);
throw e;
}
}
});
writer.start();
tailer.start();
try {
writer.join();
tailer.join();
assertFalse(
"error occurred, see log above", error.get()
);
} catch (InterruptedException e) {
LOG.info("interrupted waiting for writer or tailer to complete");
Thread.currentThread().interrupt();
}
}
private boolean validateSequentialBytes(byte[] buf, int startPos, int len) {
for (int i = 0; i < len; i++) {
int expected = (i + startPos) % 127;
if (buf[i] % 127 != expected) {
LOG.error(String.format("at position [%d], got [%d] and expected [%d]",
startPos, buf[i], expected));
return false;
}
}
return true;
}
private long tailFile(Path file, long startPos) throws IOException {
long numRead = 0;
FSDataInputStream inputStream = fileSystem.open(file);
inputStream.seek(startPos);
int len = 4 * 1024;
byte[] buf = new byte[len];
int read;
while ((read = inputStream.read(buf)) > -1) {
LOG.info(String.format("read %d bytes", read));
if (!validateSequentialBytes(buf, (int) (startPos + numRead), read)) {
LOG.error(String.format("invalid bytes: [%s]\n", Arrays.toString(buf)));
throw new ChecksumException(
String.format("unable to validate bytes"),
startPos
);
}
numRead += read;
}
inputStream.close();
return numRead + startPos - 1;
}
}
| 14,069 | 29.653595 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.io.OutputStream;
import java.util.Arrays;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.UserGroupInformation;
/** Utilities for append-related tests */
public class AppendTestUtil {
/** For specifying the random number generator seed,
* change the following value:
*/
static final Long RANDOM_NUMBER_GENERATOR_SEED = null;
static final Log LOG = LogFactory.getLog(AppendTestUtil.class);
private static final Random SEED = new Random();
static {
final long seed = RANDOM_NUMBER_GENERATOR_SEED == null?
SEED.nextLong(): RANDOM_NUMBER_GENERATOR_SEED;
LOG.info("seed=" + seed);
SEED.setSeed(seed);
}
private static final ThreadLocal<Random> RANDOM = new ThreadLocal<Random>() {
@Override
protected Random initialValue() {
final Random r = new Random();
synchronized(SEED) {
final long seed = SEED.nextLong();
r.setSeed(seed);
LOG.info(Thread.currentThread().getName() + ": seed=" + seed);
}
return r;
}
};
static final int BLOCK_SIZE = 1024;
static final int NUM_BLOCKS = 10;
static final int FILE_SIZE = NUM_BLOCKS * BLOCK_SIZE + 1;
static long seed = -1;
static int nextInt() {return RANDOM.get().nextInt();}
static int nextInt(int n) {return RANDOM.get().nextInt(n);}
static int nextLong() {return RANDOM.get().nextInt();}
public static byte[] randomBytes(long seed, int size) {
LOG.info("seed=" + seed + ", size=" + size);
final byte[] b = new byte[size];
final Random rand = new Random(seed);
rand.nextBytes(b);
return b;
}
/** @return a random file partition of length n. */
public static int[] randomFilePartition(int n, int parts) {
int[] p = new int[parts];
for(int i = 0; i < p.length; i++) {
p[i] = nextInt(n - i - 1) + 1;
}
Arrays.sort(p);
for(int i = 1; i < p.length; i++) {
if (p[i] <= p[i - 1]) {
p[i] = p[i - 1] + 1;
}
}
LOG.info("partition=" + Arrays.toString(p));
assertTrue("i=0", p[0] > 0 && p[0] < n);
for(int i = 1; i < p.length; i++) {
assertTrue("i=" + i, p[i] > p[i - 1] && p[i] < n);
}
return p;
}
static void sleep(long ms) {
try {
Thread.sleep(ms);
} catch (InterruptedException e) {
LOG.info("ms=" + ms, e);
}
}
/**
* Returns the reference to a new instance of FileSystem created
* with different user name
* @param conf current Configuration
* @return FileSystem instance
* @throws IOException
* @throws InterruptedException
*/
public static FileSystem createHdfsWithDifferentUsername(final Configuration conf
) throws IOException, InterruptedException {
String username = UserGroupInformation.getCurrentUser().getShortUserName()+"_XXX";
UserGroupInformation ugi =
UserGroupInformation.createUserForTesting(username, new String[]{"supergroup"});
return DFSTestUtil.getFileSystemAs(ugi, conf);
}
public static void write(OutputStream out, int offset, int length) throws IOException {
final byte[] bytes = new byte[length];
for(int i = 0; i < length; i++) {
bytes[i] = (byte)(offset + i);
}
out.write(bytes);
}
public static void check(FileSystem fs, Path p, long length) throws IOException {
int i = -1;
try {
final FileStatus status = fs.getFileStatus(p);
FSDataInputStream in = fs.open(p);
if (in.getWrappedStream() instanceof DFSInputStream) {
long len = ((DFSInputStream)in.getWrappedStream()).getFileLength();
assertEquals(length, len);
} else {
assertEquals(length, status.getLen());
}
for(i++; i < length; i++) {
assertEquals((byte)i, (byte)in.read());
}
i = -(int)length;
assertEquals(-1, in.read()); //EOF
in.close();
} catch(IOException ioe) {
throw new IOException("p=" + p + ", length=" + length + ", i=" + i, ioe);
}
}
public static void check(DistributedFileSystem fs, Path p, int position,
int length) throws IOException {
byte[] buf = new byte[length];
int i = 0;
try {
FSDataInputStream in = fs.open(p);
in.read(position, buf, 0, buf.length);
for(i = position; i < length + position; i++) {
assertEquals((byte) i, buf[i - position]);
}
in.close();
} catch(IOException ioe) {
throw new IOException("p=" + p + ", length=" + length + ", i=" + i, ioe);
}
}
/**
* create a buffer that contains the entire test file data.
*/
public static byte[] initBuffer(int size) {
if (seed == -1)
seed = nextLong();
return randomBytes(seed, size);
}
/**
* Creates a file but does not close it
* Make sure to call close() on the returned stream
* @throws IOException an exception might be thrown
*/
public static FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl)
throws IOException {
return fileSys.create(name, true,
fileSys.getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
(short) repl, BLOCK_SIZE);
}
public static void checkFullFile(FileSystem fs, Path file, int len,
final byte[] compareContent) throws IOException {
checkFullFile(fs, file, len, compareContent, file.toString());
}
/**
* Compare the content of a file created from FileSystem and Path with
* the specified byte[] buffer's content
* @throws IOException an exception might be thrown
*/
public static void checkFullFile(FileSystem fs, Path name, int len,
final byte[] compareContent, String message) throws IOException {
checkFullFile(fs, name, len, compareContent, message, true);
}
public static void checkFullFile(FileSystem fs, Path name, int len,
final byte[] compareContent, String message,
boolean checkFileStatus) throws IOException {
if (checkFileStatus) {
final FileStatus status = fs.getFileStatus(name);
assertEquals("len=" + len + " but status.getLen()=" + status.getLen(),
len, status.getLen());
}
FSDataInputStream stm = fs.open(name);
byte[] actual = new byte[len];
stm.readFully(0, actual);
checkData(actual, 0, compareContent, message);
stm.close();
}
private static void checkData(final byte[] actual, int from,
final byte[] expected, String message) {
for (int idx = 0; idx < actual.length; idx++) {
assertEquals(message+" byte "+(from+idx)+" differs. expected "+
expected[from+idx]+" actual "+actual[idx],
expected[from+idx], actual[idx]);
actual[idx] = 0;
}
}
public static void testAppend(FileSystem fs, Path p) throws IOException {
final byte[] bytes = new byte[1000];
{ //create file
final FSDataOutputStream out = fs.create(p, (short)1);
out.write(bytes);
out.close();
assertEquals(bytes.length, fs.getFileStatus(p).getLen());
}
for(int i = 2; i < 500; i++) {
//append
final FSDataOutputStream out = fs.append(p);
out.write(bytes);
out.close();
assertEquals(i*bytes.length, fs.getFileStatus(p).getLen());
}
}
}
| 8,599 | 32.333333 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSTrash.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.TestTrash;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
/**
* Test trash using HDFS
*/
public class TestHDFSTrash {
private static MiniDFSCluster cluster = null;
@BeforeClass
public static void setUp() throws Exception {
Configuration conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
}
@AfterClass
public static void tearDown() {
if (cluster != null) { cluster.shutdown(); }
}
@Test
public void testTrash() throws IOException {
TestTrash.trashShell(cluster.getFileSystem(), new Path("/"));
}
@Test
public void testNonDefaultFS() throws IOException {
FileSystem fs = cluster.getFileSystem();
Configuration conf = fs.getConf();
conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, fs.getUri().toString());
TestTrash.trashNonDefaultFS(conf);
}
}
| 1,904 | 30.229508 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRestartDFS.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.junit.Test;
/**
* A JUnit test for checking if restarting DFS preserves integrity.
*/
public class TestRestartDFS {
public void runTests(Configuration conf, boolean serviceTest) throws Exception {
MiniDFSCluster cluster = null;
DFSTestUtil files = new DFSTestUtil.Builder().setName("TestRestartDFS").
setNumFiles(20).build();
final String dir = "/srcdat";
final Path rootpath = new Path("/");
final Path dirpath = new Path(dir);
long rootmtime;
FileStatus rootstatus;
FileStatus dirstatus;
try {
if (serviceTest) {
conf.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
"localhost:0");
}
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
FileSystem fs = cluster.getFileSystem();
files.createFiles(fs, dir);
rootmtime = fs.getFileStatus(rootpath).getModificationTime();
rootstatus = fs.getFileStatus(dirpath);
dirstatus = fs.getFileStatus(dirpath);
fs.setOwner(rootpath, rootstatus.getOwner() + "_XXX", null);
fs.setOwner(dirpath, null, dirstatus.getGroup() + "_XXX");
} finally {
if (cluster != null) { cluster.shutdown(); }
}
try {
if (serviceTest) {
conf.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
"localhost:0");
}
// Here we restart the MiniDFScluster without formatting namenode
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).format(false).build();
FileSystem fs = cluster.getFileSystem();
assertTrue("Filesystem corrupted after restart.",
files.checkFiles(fs, dir));
final FileStatus newrootstatus = fs.getFileStatus(rootpath);
assertEquals(rootmtime, newrootstatus.getModificationTime());
assertEquals(rootstatus.getOwner() + "_XXX", newrootstatus.getOwner());
assertEquals(rootstatus.getGroup(), newrootstatus.getGroup());
final FileStatus newdirstatus = fs.getFileStatus(dirpath);
assertEquals(dirstatus.getOwner(), newdirstatus.getOwner());
assertEquals(dirstatus.getGroup() + "_XXX", newdirstatus.getGroup());
rootmtime = fs.getFileStatus(rootpath).getModificationTime();
} finally {
if (cluster != null) { cluster.shutdown(); }
}
try {
if (serviceTest) {
conf.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
"localhost:0");
}
// This is a second restart to check that after the first restart
// the image written in parallel to both places did not get corrupted
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).format(false).build();
FileSystem fs = cluster.getFileSystem();
assertTrue("Filesystem corrupted after restart.",
files.checkFiles(fs, dir));
final FileStatus newrootstatus = fs.getFileStatus(rootpath);
assertEquals(rootmtime, newrootstatus.getModificationTime());
assertEquals(rootstatus.getOwner() + "_XXX", newrootstatus.getOwner());
assertEquals(rootstatus.getGroup(), newrootstatus.getGroup());
final FileStatus newdirstatus = fs.getFileStatus(dirpath);
assertEquals(dirstatus.getOwner(), newdirstatus.getOwner());
assertEquals(dirstatus.getGroup() + "_XXX", newdirstatus.getGroup());
files.cleanup(fs, dir);
} finally {
if (cluster != null) { cluster.shutdown(); }
}
}
/** check if DFS remains in proper condition after a restart */
@Test
public void testRestartDFS() throws Exception {
final Configuration conf = new HdfsConfiguration();
runTests(conf, false);
}
/** check if DFS remains in proper condition after a restart
* this rerun is with 2 ports enabled for RPC in the namenode
*/
@Test
public void testRestartDualPortDFS() throws Exception {
final Configuration conf = new HdfsConfiguration();
runTests(conf, true);
}
}
| 5,026 | 37.669231 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import java.io.File;
import java.util.Collections;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceStorage;
import org.apache.hadoop.hdfs.server.datanode.DataStorage;
import org.junit.After;
import org.junit.Test;
import com.google.common.collect.Lists;
/**
* This test ensures the appropriate response from the system when
* the system is finalized.
*/
public class TestDFSFinalize {
private static final Log LOG = LogFactory.getLog(
"org.apache.hadoop.hdfs.TestDFSFinalize");
private Configuration conf;
private int testCounter = 0;
private MiniDFSCluster cluster = null;
/**
* Writes an INFO log message containing the parameters.
*/
void log(String label, int numDirs) {
LOG.info("============================================================");
LOG.info("***TEST " + (testCounter++) + "*** "
+ label + ":"
+ " numDirs="+numDirs);
}
/**
* Verify that the current directory exists and that the previous directory
* does not exist. Verify that current hasn't been modified by comparing
* the checksum of all it's containing files with their original checksum.
*/
static void checkResult(String[] nameNodeDirs, String[] dataNodeDirs,
String bpid) throws Exception {
List<File> dirs = Lists.newArrayList();
for (int i = 0; i < nameNodeDirs.length; i++) {
File curDir = new File(nameNodeDirs[i], "current");
dirs.add(curDir);
FSImageTestUtil.assertReasonableNameCurrentDir(curDir);
}
FSImageTestUtil.assertParallelFilesAreIdentical(
dirs, Collections.<String>emptySet());
File dnCurDirs[] = new File[dataNodeDirs.length];
for (int i = 0; i < dataNodeDirs.length; i++) {
dnCurDirs[i] = new File(dataNodeDirs[i],"current");
assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, dnCurDirs[i],
false), UpgradeUtilities.checksumMasterDataNodeContents());
}
for (int i = 0; i < nameNodeDirs.length; i++) {
assertFalse(new File(nameNodeDirs[i],"previous").isDirectory());
}
if (bpid == null) {
for (int i = 0; i < dataNodeDirs.length; i++) {
assertFalse(new File(dataNodeDirs[i],"previous").isDirectory());
}
} else {
for (int i = 0; i < dataNodeDirs.length; i++) {
File bpRoot = BlockPoolSliceStorage.getBpRoot(bpid, dnCurDirs[i]);
assertFalse(new File(bpRoot,"previous").isDirectory());
File bpCurFinalizeDir = new File(bpRoot,"current/"+DataStorage.STORAGE_DIR_FINALIZED);
assertEquals(UpgradeUtilities.checksumContents(DATA_NODE,
bpCurFinalizeDir, true),
UpgradeUtilities.checksumMasterBlockPoolFinalizedContents());
}
}
}
/**
* This test attempts to finalize the NameNode and DataNode.
*/
@Test
public void testFinalize() throws Exception {
UpgradeUtilities.initialize();
for (int numDirs = 1; numDirs <= 2; numDirs++) {
/* This test requires that "current" directory not change after
* the upgrade. Actually it is ok for those contents to change.
* For now disabling block verification so that the contents are
* not changed.
* Disable duplicate replica deletion as the test intentionally
* mirrors the contents of storage directories.
*/
conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION, false);
conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
String[] nameNodeDirs = conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
String[] dataNodeDirs = conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
log("Finalize NN & DN with existing previous dir", numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "previous");
cluster = new MiniDFSCluster.Builder(conf)
.format(false)
.manageDataDfsDirs(false)
.manageNameDfsDirs(false)
.startupOption(StartupOption.REGULAR)
.build();
cluster.finalizeCluster(conf);
cluster.triggerBlockReports();
// 1 second should be enough for asynchronous DN finalize
Thread.sleep(1000);
checkResult(nameNodeDirs, dataNodeDirs, null);
log("Finalize NN & DN without existing previous dir", numDirs);
cluster.finalizeCluster(conf);
cluster.triggerBlockReports();
// 1 second should be enough for asynchronous DN finalize
Thread.sleep(1000);
checkResult(nameNodeDirs, dataNodeDirs, null);
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
UpgradeUtilities.createEmptyDirs(dataNodeDirs);
log("Finalize NN & BP with existing previous dir", numDirs);
String bpid = UpgradeUtilities.getCurrentBlockPoolID(cluster);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
UpgradeUtilities.createBlockPoolStorageDirs(dataNodeDirs, "current", bpid);
UpgradeUtilities.createBlockPoolStorageDirs(dataNodeDirs, "previous", bpid);
cluster = new MiniDFSCluster.Builder(conf)
.format(false)
.manageDataDfsDirs(false)
.manageNameDfsDirs(false)
.startupOption(StartupOption.REGULAR)
.build();
cluster.finalizeCluster(conf);
cluster.triggerBlockReports();
// 1 second should be enough for asynchronous BP finalize
Thread.sleep(1000);
checkResult(nameNodeDirs, dataNodeDirs, bpid);
log("Finalize NN & BP without existing previous dir", numDirs);
cluster.finalizeCluster(conf);
cluster.triggerBlockReports();
// 1 second should be enough for asynchronous BP finalize
Thread.sleep(1000);
checkResult(nameNodeDirs, dataNodeDirs, bpid);
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
UpgradeUtilities.createEmptyDirs(dataNodeDirs);
} // end numDir loop
}
@After
public void tearDown() throws Exception {
LOG.info("Shutting down MiniDFSCluster");
if (cluster != null) cluster.shutdown();
}
public static void main(String[] args) throws Exception {
new TestDFSFinalize().testFinalize();
}
}
| 8,289 | 40.243781 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPeerCache.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.channels.ReadableByteChannel;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hdfs.net.Peer;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.net.unix.DomainSocket;
import org.junit.Test;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import com.google.common.collect.HashMultiset;
public class TestPeerCache {
static final Log LOG = LogFactory.getLog(TestPeerCache.class);
private static class FakePeer implements Peer {
private boolean closed = false;
private final boolean hasDomain;
private final DatanodeID dnId;
public FakePeer(DatanodeID dnId, boolean hasDomain) {
this.dnId = dnId;
this.hasDomain = hasDomain;
}
@Override
public ReadableByteChannel getInputStreamChannel() {
throw new UnsupportedOperationException();
}
@Override
public void setReadTimeout(int timeoutMs) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public int getReceiveBufferSize() throws IOException {
throw new UnsupportedOperationException();
}
@Override
public boolean getTcpNoDelay() throws IOException {
return false;
}
@Override
public void setWriteTimeout(int timeoutMs) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public boolean isClosed() {
return closed;
}
@Override
public void close() throws IOException {
closed = true;
}
@Override
public String getRemoteAddressString() {
return dnId.getInfoAddr();
}
@Override
public String getLocalAddressString() {
return "127.0.0.1:123";
}
@Override
public InputStream getInputStream() throws IOException {
throw new UnsupportedOperationException();
}
@Override
public OutputStream getOutputStream() throws IOException {
throw new UnsupportedOperationException();
}
@Override
public boolean isLocal() {
return true;
}
@Override
public String toString() {
return "FakePeer(dnId=" + dnId + ")";
}
@Override
public DomainSocket getDomainSocket() {
if (!hasDomain) return null;
// Return a mock which throws an exception whenever any function is
// called.
return Mockito.mock(DomainSocket.class,
new Answer<Object>() {
@Override
public Object answer(InvocationOnMock invocation)
throws Throwable {
throw new RuntimeException("injected fault.");
} });
}
@Override
public boolean equals(Object o) {
if (!(o instanceof FakePeer)) return false;
FakePeer other = (FakePeer)o;
return hasDomain == other.hasDomain &&
dnId.equals(other.dnId);
}
@Override
public int hashCode() {
return dnId.hashCode() ^ (hasDomain ? 1 : 0);
}
@Override
public boolean hasSecureChannel() {
return false;
}
}
@Test
public void testAddAndRetrieve() throws Exception {
PeerCache cache = new PeerCache(3, 100000);
DatanodeID dnId = new DatanodeID("192.168.0.1",
"fakehostname", "fake_datanode_id",
100, 101, 102, 103);
FakePeer peer = new FakePeer(dnId, false);
cache.put(dnId, peer);
assertTrue(!peer.isClosed());
assertEquals(1, cache.size());
assertEquals(peer, cache.get(dnId, false));
assertEquals(0, cache.size());
cache.close();
}
@Test
public void testExpiry() throws Exception {
final int CAPACITY = 3;
final int EXPIRY_PERIOD = 10;
PeerCache cache = new PeerCache(CAPACITY, EXPIRY_PERIOD);
DatanodeID dnIds[] = new DatanodeID[CAPACITY];
FakePeer peers[] = new FakePeer[CAPACITY];
for (int i = 0; i < CAPACITY; ++i) {
dnIds[i] = new DatanodeID("192.168.0.1",
"fakehostname_" + i, "fake_datanode_id",
100, 101, 102, 103);
peers[i] = new FakePeer(dnIds[i], false);
}
for (int i = 0; i < CAPACITY; ++i) {
cache.put(dnIds[i], peers[i]);
}
// Wait for the peers to expire
Thread.sleep(EXPIRY_PERIOD * 50);
assertEquals(0, cache.size());
// make sure that the peers were closed when they were expired
for (int i = 0; i < CAPACITY; ++i) {
assertTrue(peers[i].isClosed());
}
// sleep for another second and see if
// the daemon thread runs fine on empty cache
Thread.sleep(EXPIRY_PERIOD * 50);
cache.close();
}
@Test
public void testEviction() throws Exception {
final int CAPACITY = 3;
PeerCache cache = new PeerCache(CAPACITY, 100000);
DatanodeID dnIds[] = new DatanodeID[CAPACITY + 1];
FakePeer peers[] = new FakePeer[CAPACITY + 1];
for (int i = 0; i < dnIds.length; ++i) {
dnIds[i] = new DatanodeID("192.168.0.1",
"fakehostname_" + i, "fake_datanode_id_" + i,
100, 101, 102, 103);
peers[i] = new FakePeer(dnIds[i], false);
}
for (int i = 0; i < CAPACITY; ++i) {
cache.put(dnIds[i], peers[i]);
}
// Check that the peers are cached
assertEquals(CAPACITY, cache.size());
// Add another entry and check that the first entry was evicted
cache.put(dnIds[CAPACITY], peers[CAPACITY]);
assertEquals(CAPACITY, cache.size());
assertSame(null, cache.get(dnIds[0], false));
// Make sure that the other entries are still there
for (int i = 1; i < CAPACITY; ++i) {
Peer peer = cache.get(dnIds[i], false);
assertSame(peers[i], peer);
assertTrue(!peer.isClosed());
peer.close();
}
assertEquals(1, cache.size());
cache.close();
}
@Test
public void testMultiplePeersWithSameKey() throws Exception {
final int CAPACITY = 3;
PeerCache cache = new PeerCache(CAPACITY, 100000);
DatanodeID dnId = new DatanodeID("192.168.0.1",
"fakehostname", "fake_datanode_id",
100, 101, 102, 103);
HashMultiset<FakePeer> peers = HashMultiset.create(CAPACITY);
for (int i = 0; i < CAPACITY; ++i) {
FakePeer peer = new FakePeer(dnId, false);
peers.add(peer);
cache.put(dnId, peer);
}
// Check that all of the peers ended up in the cache
assertEquals(CAPACITY, cache.size());
while (!peers.isEmpty()) {
Peer peer = cache.get(dnId, false);
assertTrue(peer != null);
assertTrue(!peer.isClosed());
peers.remove(peer);
}
assertEquals(0, cache.size());
cache.close();
}
@Test
public void testDomainSocketPeers() throws Exception {
final int CAPACITY = 3;
PeerCache cache = new PeerCache(CAPACITY, 100000);
DatanodeID dnId = new DatanodeID("192.168.0.1",
"fakehostname", "fake_datanode_id",
100, 101, 102, 103);
HashMultiset<FakePeer> peers = HashMultiset.create(CAPACITY);
for (int i = 0; i < CAPACITY; ++i) {
FakePeer peer = new FakePeer(dnId, i == CAPACITY - 1);
peers.add(peer);
cache.put(dnId, peer);
}
// Check that all of the peers ended up in the cache
assertEquals(CAPACITY, cache.size());
// Test that get(requireDomainPeer=true) finds the peer with the
// domain socket.
Peer peer = cache.get(dnId, true);
assertTrue(peer.getDomainSocket() != null);
peers.remove(peer);
// Test that get(requireDomainPeer=true) returns null when there are
// no more peers with domain sockets.
peer = cache.get(dnId, true);
assertTrue(peer == null);
// Check that all of the other peers ended up in the cache.
while (!peers.isEmpty()) {
peer = cache.get(dnId, false);
assertTrue(peer != null);
assertTrue(!peer.isClosed());
peers.remove(peer);
}
assertEquals(0, cache.size());
cache.close();
}
}
| 8,940 | 29.411565 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClose.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.channels.ClosedChannelException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Test;
public class TestClose {
@Test
public void testWriteAfterClose() throws IOException {
Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.build();
try {
final byte[] data = "foo".getBytes();
FileSystem fs = FileSystem.get(conf);
OutputStream out = fs.create(new Path("/test"));
out.write(data);
out.close();
try {
// Should fail.
out.write(data);
fail("Should not have been able to write more data after file is closed.");
} catch (ClosedChannelException cce) {
// We got the correct exception. Ignoring.
}
// Should succeed. Double closes are OK.
out.close();
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
}
| 2,018 | 30.061538 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesWithKMS.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertTrue;
import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
import org.apache.hadoop.crypto.key.kms.server.MiniKMS;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import java.io.File;
import java.util.Arrays;
import java.util.UUID;
public class TestEncryptionZonesWithKMS extends TestEncryptionZones {
private MiniKMS miniKMS;
@Override
protected String getKeyProviderURI() {
return KMSClientProvider.SCHEME_NAME + "://" +
miniKMS.getKMSUrl().toExternalForm().replace("://", "@");
}
@Before
public void setup() throws Exception {
File kmsDir = new File("target/test-classes/" +
UUID.randomUUID().toString());
Assert.assertTrue(kmsDir.mkdirs());
MiniKMS.Builder miniKMSBuilder = new MiniKMS.Builder();
miniKMS = miniKMSBuilder.setKmsConfDir(kmsDir).build();
miniKMS.start();
super.setup();
}
@After
public void teardown() {
super.teardown();
miniKMS.stop();
}
@Override
protected void setProvider() {
}
@Test(timeout = 120000)
public void testCreateEZPopulatesEDEKCache() throws Exception {
final Path zonePath = new Path("/TestEncryptionZone");
fsWrapper.mkdir(zonePath, FsPermission.getDirDefault(), false);
dfsAdmin.createEncryptionZone(zonePath, TEST_KEY);
assertTrue(((KMSClientProvider)fs.getClient().getKeyProvider()).
getEncKeyQueueSize(TEST_KEY) > 0);
}
@Test(timeout = 120000)
public void testDelegationToken() throws Exception {
final String renewer = "JobTracker";
UserGroupInformation.createRemoteUser(renewer);
Credentials creds = new Credentials();
Token<?> tokens[] = fs.addDelegationTokens(renewer, creds);
DistributedFileSystem.LOG.debug("Delegation tokens: " +
Arrays.asList(tokens));
Assert.assertEquals(2, tokens.length);
Assert.assertEquals(2, creds.numberOfTokens());
// If the dt exists, will not get again
tokens = fs.addDelegationTokens(renewer, creds);
Assert.assertEquals(0, tokens.length);
Assert.assertEquals(2, creds.numberOfTokens());
}
}
| 3,223 | 32.583333 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import java.io.File;
import java.io.FileFilter;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import java.util.Random;
import java.util.concurrent.Callable;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.commons.io.FileUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.TestFileTruncate;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
import com.google.common.base.Preconditions;
/**
* Test randomly mixing append, snapshot and truncate operations.
* Use local file system to simulate the each operation and verify
* the correctness.
*/
public class TestAppendSnapshotTruncate {
static {
GenericTestUtils.setLogLevel(NameNode.stateChangeLog, Level.ALL);
}
private static final Log LOG = LogFactory.getLog(TestAppendSnapshotTruncate.class);
private static final int BLOCK_SIZE = 1024;
private static final int DATANODE_NUM = 3;
private static final short REPLICATION = 3;
private static final int FILE_WORKER_NUM = 3;
private static final long TEST_TIME_SECOND = 10;
private static final long TEST_TIMEOUT_SECOND = TEST_TIME_SECOND + 60;
static final int SHORT_HEARTBEAT = 1;
static final String[] EMPTY_STRINGS = {};
static Configuration conf;
static MiniDFSCluster cluster;
static DistributedFileSystem dfs;
@BeforeClass
public static void startUp() throws IOException {
conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, BLOCK_SIZE);
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BLOCK_SIZE);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, SHORT_HEARTBEAT);
conf.setLong(
DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 1);
cluster = new MiniDFSCluster.Builder(conf)
.format(true)
.numDataNodes(DATANODE_NUM)
.nameNodePort(NameNode.DEFAULT_PORT)
.waitSafeMode(true)
.build();
dfs = cluster.getFileSystem();
}
@AfterClass
public static void tearDown() throws IOException {
if(dfs != null) {
dfs.close();
}
if(cluster != null) {
cluster.shutdown();
}
}
/** Test randomly mixing append, snapshot and truncate operations. */
@Test(timeout=TEST_TIMEOUT_SECOND*1000)
public void testAST() throws Exception {
final String dirPathString = "/dir";
final Path dir = new Path(dirPathString);
dfs.mkdirs(dir);
dfs.allowSnapshot(dir);
final File localDir = new File(
System.getProperty("test.build.data", "target/test/data")
+ dirPathString);
if (localDir.exists()) {
FileUtil.fullyDelete(localDir);
}
localDir.mkdirs();
final DirWorker w = new DirWorker(dir, localDir, FILE_WORKER_NUM);
w.startAllFiles();
w.start();
Worker.sleep(TEST_TIME_SECOND * 1000);
w.stop();
w.stopAllFiles();
w.checkEverything();
}
static final FileFilter FILE_ONLY = new FileFilter() {
@Override
public boolean accept(File f) {
return f.isFile();
}
};
static class DirWorker extends Worker {
final Path dir;
final File localDir;
final FileWorker[] files;
private Map<String, Path> snapshotPaths = new HashMap<String, Path>();
private AtomicInteger snapshotCount = new AtomicInteger();
DirWorker(Path dir, File localDir, int nFiles) throws IOException {
super(dir.getName());
this.dir = dir;
this.localDir = localDir;
this.files = new FileWorker[nFiles];
for(int i = 0; i < files.length; i++) {
files[i] = new FileWorker(dir, localDir, String.format("file%02d", i));
}
}
static String getSnapshotName(int n) {
return String.format("s%02d", n);
}
String createSnapshot(String snapshot) throws IOException {
final StringBuilder b = new StringBuilder("createSnapshot: ")
.append(snapshot).append(" for ").append(dir);
{
//copy all local files to a sub dir to simulate snapshot.
final File subDir = new File(localDir, snapshot);
Assert.assertFalse(subDir.exists());
subDir.mkdir();
for(File f : localDir.listFiles(FILE_ONLY)) {
FileUtils.copyFile(f, new File(subDir, f.getName()));
}
}
final Path p = dfs.createSnapshot(dir, snapshot);
snapshotPaths.put(snapshot, p);
return b.toString();
}
String checkSnapshot(String snapshot) throws IOException {
final StringBuilder b = new StringBuilder("checkSnapshot: ")
.append(snapshot);
final File subDir = new File(localDir, snapshot);
Assert.assertTrue(subDir.exists());
final File[] localFiles = subDir.listFiles(FILE_ONLY);
final Path p = snapshotPaths.get(snapshot);
final FileStatus[] statuses = dfs.listStatus(p);
Assert.assertEquals(localFiles.length, statuses.length);
b.append(p).append(" vs ").append(subDir).append(", ")
.append(statuses.length).append(" entries");
Arrays.sort(localFiles);
Arrays.sort(statuses);
for(int i = 0; i < statuses.length; i++) {
FileWorker.checkFullFile(statuses[i].getPath(), localFiles[i]);
}
return b.toString();
}
String deleteSnapshot(String snapshot) throws IOException {
final StringBuilder b = new StringBuilder("deleteSnapshot: ")
.append(snapshot).append(" from ").append(dir);
FileUtil.fullyDelete(new File(localDir, snapshot));
dfs.deleteSnapshot(dir, snapshot);
snapshotPaths.remove(snapshot);
return b.toString();
}
@Override
public String call() throws Exception {
final int op = ThreadLocalRandom.current().nextInt(6);
if (op <= 1) {
pauseAllFiles();
try {
final String snapshot = getSnapshotName(snapshotCount.getAndIncrement());
return createSnapshot(snapshot);
} finally {
startAllFiles();
}
} else if (op <= 3) {
final String[] keys = snapshotPaths.keySet().toArray(EMPTY_STRINGS);
if (keys.length == 0) {
return "NO-OP";
}
final String snapshot = keys[ThreadLocalRandom.current()
.nextInt(keys.length)];
final String s = checkSnapshot(snapshot);
if (op == 2) {
return deleteSnapshot(snapshot);
}
return s;
} else {
return "NO-OP";
}
}
void pauseAllFiles() {
for(FileWorker f : files) {
f.pause();
}
for(int i = 0; i < files.length; ) {
sleep(100);
for(; i < files.length && files[i].isPaused(); i++);
}
}
void startAllFiles() {
for(FileWorker f : files) {
f.start();
}
}
void stopAllFiles() throws InterruptedException {
for(FileWorker f : files) {
f.stop();
}
}
void checkEverything() throws IOException {
LOG.info("checkEverything");
for(FileWorker f : files) {
f.checkFullFile();
f.checkErrorState();
}
for(String snapshot : snapshotPaths.keySet()) {
checkSnapshot(snapshot);
}
checkErrorState();
}
}
static class FileWorker extends Worker {
final Path file;
final File localFile;
FileWorker(Path dir, File localDir, String filename) throws IOException {
super(filename);
this.file = new Path(dir, filename);
this.localFile = new File(localDir, filename);
localFile.createNewFile();
dfs.create(file, false, 4096, REPLICATION, BLOCK_SIZE).close();
}
@Override
public String call() throws IOException {
final int op = ThreadLocalRandom.current().nextInt(9);
if (op == 0) {
return checkFullFile();
} else {
final int nBlocks = ThreadLocalRandom.current().nextInt(4) + 1;
final int lastBlockSize = ThreadLocalRandom.current()
.nextInt(BLOCK_SIZE) + 1;
final int nBytes = nBlocks*BLOCK_SIZE + lastBlockSize;
if (op <= 4) {
return append(nBytes);
} else if (op <= 6) {
return truncateArbitrarily(nBytes);
} else {
return truncateToBlockBoundary(nBlocks);
}
}
}
String append(int n) throws IOException {
final StringBuilder b = new StringBuilder("append ")
.append(n).append(" bytes to ").append(file.getName());
final byte[] bytes = new byte[n];
ThreadLocalRandom.current().nextBytes(bytes);
{ // write to local file
final FileOutputStream out = new FileOutputStream(localFile, true);
out.write(bytes, 0, bytes.length);
out.close();
}
{
final FSDataOutputStream out = dfs.append(file);
out.write(bytes, 0, bytes.length);
out.close();
}
return b.toString();
}
String truncateArbitrarily(int nBytes) throws IOException {
Preconditions.checkArgument(nBytes > 0);
final int length = checkLength();
final StringBuilder b = new StringBuilder("truncateArbitrarily: ")
.append(nBytes).append(" bytes from ").append(file.getName())
.append(", length=" + length);
truncate(length > nBytes? length - nBytes: 0, b);
return b.toString();
}
String truncateToBlockBoundary(int nBlocks) throws IOException {
Preconditions.checkArgument(nBlocks > 0);
final int length = checkLength();
final StringBuilder b = new StringBuilder("truncateToBlockBoundary: ")
.append(nBlocks).append(" blocks from ").append(file.getName())
.append(", length=" + length);
final int n = (nBlocks - 1)*BLOCK_SIZE + (length%BLOCK_SIZE);
Preconditions.checkState(truncate(length > n? length - n: 0, b), b);
return b.toString();
}
private boolean truncate(long newLength, StringBuilder b) throws IOException {
final RandomAccessFile raf = new RandomAccessFile(localFile, "rw");
raf.setLength(newLength);
raf.close();
final boolean isReady = dfs.truncate(file, newLength);
b.append(", newLength=").append(newLength)
.append(", isReady=").append(isReady);
if (!isReady) {
TestFileTruncate.checkBlockRecovery(file, dfs, 100, 300L);
}
return isReady;
}
int checkLength() throws IOException {
return checkLength(file, localFile);
}
static int checkLength(Path file, File localFile) throws IOException {
final long length = dfs.getFileStatus(file).getLen();
Assert.assertEquals(localFile.length(), length);
Assert.assertTrue(length <= Integer.MAX_VALUE);
return (int)length;
}
String checkFullFile() throws IOException {
return checkFullFile(file, localFile);
}
static String checkFullFile(Path file, File localFile) throws IOException {
final StringBuilder b = new StringBuilder("checkFullFile: ")
.append(file.getName()).append(" vs ").append(localFile);
final byte[] bytes = new byte[checkLength(file, localFile)];
b.append(", length=").append(bytes.length);
final FileInputStream in = new FileInputStream(localFile);
for(int n = 0; n < bytes.length; ) {
n += in.read(bytes, n, bytes.length - n);
}
in.close();
AppendTestUtil.checkFullFile(dfs, file, bytes.length, bytes,
"File content mismatch: " + b, false);
return b.toString();
}
}
static abstract class Worker implements Callable<String> {
enum State {
IDLE(false), RUNNING(false), STOPPED(true), ERROR(true);
final boolean isTerminated;
State(boolean isTerminated) {
this.isTerminated = isTerminated;
}
};
final String name;
final AtomicReference<State> state = new AtomicReference<State>(State.IDLE);
final AtomicBoolean isCalling = new AtomicBoolean();
final AtomicReference<Thread> thread = new AtomicReference<Thread>();
private Throwable thrown = null;
Worker(String name) {
this.name = name;
}
State checkErrorState() {
final State s = state.get();
if (s == State.ERROR) {
throw new IllegalStateException(name + " has " + s, thrown);
}
return s;
}
void setErrorState(Throwable t) {
checkErrorState();
LOG.error("Worker " + name + " failed.", t);
state.set(State.ERROR);
thrown = t;
}
void start() {
Preconditions.checkState(state.compareAndSet(State.IDLE, State.RUNNING));
if (thread.get() == null) {
final Thread t = new Thread(null, new Runnable() {
@Override
public void run() {
for(State s; !(s = checkErrorState()).isTerminated;) {
if (s == State.RUNNING) {
isCalling.set(true);
try {
LOG.info(call());
} catch(Throwable t) {
setErrorState(t);
return;
}
isCalling.set(false);
}
sleep(ThreadLocalRandom.current().nextInt(100) + 50);
}
}
}, name);
Preconditions.checkState(thread.compareAndSet(null, t));
t.start();
}
}
boolean isPaused() {
final State s = checkErrorState();
if (s == State.STOPPED) {
throw new IllegalStateException(name + " is " + s);
}
return s == State.IDLE && !isCalling.get();
}
void pause() {
Preconditions.checkState(state.compareAndSet(State.RUNNING, State.IDLE));
}
void stop() throws InterruptedException {
checkErrorState();
state.set(State.STOPPED);
thread.get().join();
}
static void sleep(final long sleepTimeMs) {
try {
Thread.sleep(sleepTimeMs);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
}
}
| 15,538 | 30.202811 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.util.EnumSet;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.TestInterDatanodeProtocol;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.After;
import org.junit.Test;
public class TestLeaseRecovery {
static final int BLOCK_SIZE = 1024;
static final short REPLICATION_NUM = (short)3;
private static final long LEASE_PERIOD = 300L;
private MiniDFSCluster cluster;
@After
public void shutdown() throws IOException {
if (cluster != null) {
cluster.shutdown();
}
}
static void checkMetaInfo(ExtendedBlock b, DataNode dn
) throws IOException {
TestInterDatanodeProtocol.checkMetaInfo(b, dn);
}
static int min(Integer... x) {
int m = x[0];
for(int i = 1; i < x.length; i++) {
if (x[i] < m) {
m = x[i];
}
}
return m;
}
void waitLeaseRecovery(MiniDFSCluster cluster) {
cluster.setLeasePeriod(LEASE_PERIOD, LEASE_PERIOD);
// wait for the lease to expire
try {
Thread.sleep(2 * 3000); // 2 heartbeat intervals
} catch (InterruptedException e) {
}
}
/**
* The following test first creates a file with a few blocks.
* It randomly truncates the replica of the last block stored in each datanode.
* Finally, it triggers block synchronization to synchronize all stored block.
*/
@Test
public void testBlockSynchronization() throws Exception {
final int ORG_FILE_SIZE = 3000;
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).build();
cluster.waitActive();
//create a file
DistributedFileSystem dfs = cluster.getFileSystem();
String filestr = "/foo";
Path filepath = new Path(filestr);
DFSTestUtil.createFile(dfs, filepath, ORG_FILE_SIZE, REPLICATION_NUM, 0L);
assertTrue(dfs.exists(filepath));
DFSTestUtil.waitReplication(dfs, filepath, REPLICATION_NUM);
//get block info for the last block
LocatedBlock locatedblock = TestInterDatanodeProtocol.getLastLocatedBlock(
dfs.dfs.getNamenode(), filestr);
DatanodeInfo[] datanodeinfos = locatedblock.getLocations();
assertEquals(REPLICATION_NUM, datanodeinfos.length);
//connect to data nodes
DataNode[] datanodes = new DataNode[REPLICATION_NUM];
for(int i = 0; i < REPLICATION_NUM; i++) {
datanodes[i] = cluster.getDataNode(datanodeinfos[i].getIpcPort());
assertTrue(datanodes[i] != null);
}
//verify Block Info
ExtendedBlock lastblock = locatedblock.getBlock();
DataNode.LOG.info("newblocks=" + lastblock);
for(int i = 0; i < REPLICATION_NUM; i++) {
checkMetaInfo(lastblock, datanodes[i]);
}
DataNode.LOG.info("dfs.dfs.clientName=" + dfs.dfs.clientName);
cluster.getNameNodeRpc().append(filestr, dfs.dfs.clientName,
new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND)));
// expire lease to trigger block recovery.
waitLeaseRecovery(cluster);
Block[] updatedmetainfo = new Block[REPLICATION_NUM];
long oldSize = lastblock.getNumBytes();
lastblock = TestInterDatanodeProtocol.getLastLocatedBlock(
dfs.dfs.getNamenode(), filestr).getBlock();
long currentGS = lastblock.getGenerationStamp();
for(int i = 0; i < REPLICATION_NUM; i++) {
updatedmetainfo[i] = DataNodeTestUtils.getFSDataset(datanodes[i]).getStoredBlock(
lastblock.getBlockPoolId(), lastblock.getBlockId());
assertEquals(lastblock.getBlockId(), updatedmetainfo[i].getBlockId());
assertEquals(oldSize, updatedmetainfo[i].getNumBytes());
assertEquals(currentGS, updatedmetainfo[i].getGenerationStamp());
}
// verify that lease recovery does not occur when namenode is in safemode
System.out.println("Testing that lease recovery cannot happen during safemode.");
filestr = "/foo.safemode";
filepath = new Path(filestr);
dfs.create(filepath, (short)1);
cluster.getNameNodeRpc().setSafeMode(
HdfsConstants.SafeModeAction.SAFEMODE_ENTER, false);
assertTrue(dfs.dfs.exists(filestr));
DFSTestUtil.waitReplication(dfs, filepath, (short)1);
waitLeaseRecovery(cluster);
// verify that we still cannot recover the lease
LeaseManager lm = NameNodeAdapter.getLeaseManager(cluster.getNamesystem());
assertTrue("Found " + lm.countLease() + " lease, expected 1", lm.countLease() == 1);
cluster.getNameNodeRpc().setSafeMode(
HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, false);
}
/**
* Block Recovery when the meta file not having crcs for all chunks in block
* file
*/
@Test
public void testBlockRecoveryWithLessMetafile() throws Exception {
Configuration conf = new Configuration();
conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,
UserGroupInformation.getCurrentUser().getShortUserName());
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
Path file = new Path("/testRecoveryFile");
DistributedFileSystem dfs = cluster.getFileSystem();
FSDataOutputStream out = dfs.create(file);
int count = 0;
while (count < 2 * 1024 * 1024) {
out.writeBytes("Data");
count += 4;
}
out.hsync();
// abort the original stream
((DFSOutputStream) out.getWrappedStream()).abort();
LocatedBlocks locations = cluster.getNameNodeRpc().getBlockLocations(
file.toString(), 0, count);
ExtendedBlock block = locations.get(0).getBlock();
DataNode dn = cluster.getDataNodes().get(0);
BlockLocalPathInfo localPathInfo = dn.getBlockLocalPathInfo(block, null);
File metafile = new File(localPathInfo.getMetaPath());
assertTrue(metafile.exists());
// reduce the block meta file size
RandomAccessFile raf = new RandomAccessFile(metafile, "rw");
raf.setLength(metafile.length() - 20);
raf.close();
// restart DN to make replica to RWR
DataNodeProperties dnProp = cluster.stopDataNode(0);
cluster.restartDataNode(dnProp, true);
// try to recover the lease
DistributedFileSystem newdfs = (DistributedFileSystem) FileSystem
.newInstance(cluster.getConfiguration(0));
count = 0;
while (++count < 10 && !newdfs.recoverLease(file)) {
Thread.sleep(1000);
}
assertTrue("File should be closed", newdfs.recoverLease(file));
}
/**
* Recover the lease on a file and append file from another client.
*/
@Test
public void testLeaseRecoveryAndAppend() throws Exception {
Configuration conf = new Configuration();
try{
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
Path file = new Path("/testLeaseRecovery");
DistributedFileSystem dfs = cluster.getFileSystem();
// create a file with 0 bytes
FSDataOutputStream out = dfs.create(file);
out.hflush();
out.hsync();
// abort the original stream
((DFSOutputStream) out.getWrappedStream()).abort();
DistributedFileSystem newdfs =
(DistributedFileSystem) FileSystem.newInstance
(cluster.getConfiguration(0));
// Append to a file , whose lease is held by another client should fail
try {
newdfs.append(file);
fail("Append to a file(lease is held by another client) should fail");
} catch (RemoteException e) {
assertTrue(e.getMessage().contains("file lease is currently owned"));
}
// Lease recovery on first try should be successful
boolean recoverLease = newdfs.recoverLease(file);
assertTrue(recoverLease);
FSDataOutputStream append = newdfs.append(file);
append.write("test".getBytes());
append.close();
}finally{
if (cluster != null) {
cluster.shutdown();
cluster = null;
}
}
}
}
| 9,797 | 36.396947 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFsShellPermission.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.PrintStream;
import java.net.URI;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Arrays;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.Test;
/**
* This test covers privilege related aspects of FsShell
*
*/
public class TestFsShellPermission {
static private final String TEST_ROOT = "/testroot";
static UserGroupInformation createUGI(String ownername, String groupName) {
return UserGroupInformation.createUserForTesting(ownername,
new String[]{groupName});
}
private class FileEntry {
private String path;
private boolean isDir;
private String owner;
private String group;
private String permission;
public FileEntry(String path, boolean isDir,
String owner, String group, String permission) {
this.path = path;
this.isDir = isDir;
this.owner = owner;
this.group = group;
this.permission = permission;
}
String getPath() { return path; }
boolean isDirectory() { return isDir; }
String getOwner() { return owner; }
String getGroup() { return group; }
String getPermission() { return permission; }
}
private void createFiles(FileSystem fs, String topdir,
FileEntry[] entries) throws IOException {
for (FileEntry entry : entries) {
String newPathStr = topdir + "/" + entry.getPath();
Path newPath = new Path(newPathStr);
if (entry.isDirectory()) {
fs.mkdirs(newPath);
} else {
FileSystemTestHelper.createFile(fs, newPath);
}
fs.setPermission(newPath, new FsPermission(entry.getPermission()));
fs.setOwner(newPath, entry.getOwner(), entry.getGroup());
}
}
/** delete directory and everything underneath it.*/
private static void deldir(FileSystem fs, String topdir) throws IOException {
fs.delete(new Path(topdir), true);
}
static String execCmd(FsShell shell, final String[] args) throws Exception {
ByteArrayOutputStream baout = new ByteArrayOutputStream();
PrintStream out = new PrintStream(baout, true);
PrintStream old = System.out;
System.setOut(out);
int ret = shell.run(args);
out.close();
System.setOut(old);
return String.valueOf(ret);
}
/*
* Each instance of TestDeleteHelper captures one testing scenario.
*
* To create all files listed in fileEntries, and then delete as user
* doAsuser the deleteEntry with command+options specified in cmdAndOptions.
*
* When expectedToDelete is true, the deleteEntry is expected to be deleted;
* otherwise, it's not expected to be deleted. At the end of test,
* the existence of deleteEntry is checked against expectedToDelete
* to ensure the command is finished with expected result
*/
private class TestDeleteHelper {
private FileEntry[] fileEntries;
private FileEntry deleteEntry;
private String cmdAndOptions;
private boolean expectedToDelete;
final String doAsGroup;
final UserGroupInformation userUgi;
public TestDeleteHelper(
FileEntry[] fileEntries,
FileEntry deleteEntry,
String cmdAndOptions,
String doAsUser,
boolean expectedToDelete) {
this.fileEntries = fileEntries;
this.deleteEntry = deleteEntry;
this.cmdAndOptions = cmdAndOptions;
this.expectedToDelete = expectedToDelete;
doAsGroup = doAsUser.equals("hdfs")? "supergroup" : "users";
userUgi = createUGI(doAsUser, doAsGroup);
}
public void execute(Configuration conf, FileSystem fs) throws Exception {
fs.mkdirs(new Path(TEST_ROOT));
createFiles(fs, TEST_ROOT, fileEntries);
final FsShell fsShell = new FsShell(conf);
final String deletePath = TEST_ROOT + "/" + deleteEntry.getPath();
String[] tmpCmdOpts = StringUtils.split(cmdAndOptions);
ArrayList<String> tmpArray = new ArrayList<String>(Arrays.asList(tmpCmdOpts));
tmpArray.add(deletePath);
final String[] cmdOpts = tmpArray.toArray(new String[tmpArray.size()]);
userUgi.doAs(new PrivilegedExceptionAction<String>() {
public String run() throws Exception {
return execCmd(fsShell, cmdOpts);
}
});
boolean deleted = !fs.exists(new Path(deletePath));
assertEquals(expectedToDelete, deleted);
deldir(fs, TEST_ROOT);
}
}
private TestDeleteHelper genDeleteEmptyDirHelper(final String cmdOpts,
final String targetPerm,
final String asUser,
boolean expectedToDelete) {
FileEntry[] files = {
new FileEntry("userA", true, "userA", "users", "755"),
new FileEntry("userA/userB", true, "userB", "users", targetPerm)
};
FileEntry deleteEntry = files[1];
return new TestDeleteHelper(files, deleteEntry, cmdOpts, asUser,
expectedToDelete);
}
// Expect target to be deleted
private TestDeleteHelper genRmrEmptyDirWithReadPerm() {
return genDeleteEmptyDirHelper("-rm -r", "744", "userA", true);
}
// Expect target to be deleted
private TestDeleteHelper genRmrEmptyDirWithNoPerm() {
return genDeleteEmptyDirHelper("-rm -r", "700", "userA", true);
}
// Expect target to be deleted
private TestDeleteHelper genRmrfEmptyDirWithNoPerm() {
return genDeleteEmptyDirHelper("-rm -r -f", "700", "userA", true);
}
private TestDeleteHelper genDeleteNonEmptyDirHelper(final String cmd,
final String targetPerm,
final String asUser,
boolean expectedToDelete) {
FileEntry[] files = {
new FileEntry("userA", true, "userA", "users", "755"),
new FileEntry("userA/userB", true, "userB", "users", targetPerm),
new FileEntry("userA/userB/xyzfile", false, "userB", "users",
targetPerm)
};
FileEntry deleteEntry = files[1];
return new TestDeleteHelper(files, deleteEntry, cmd, asUser,
expectedToDelete);
}
// Expect target not to be deleted
private TestDeleteHelper genRmrNonEmptyDirWithReadPerm() {
return genDeleteNonEmptyDirHelper("-rm -r", "744", "userA", false);
}
// Expect target not to be deleted
private TestDeleteHelper genRmrNonEmptyDirWithNoPerm() {
return genDeleteNonEmptyDirHelper("-rm -r", "700", "userA", false);
}
// Expect target to be deleted
private TestDeleteHelper genRmrNonEmptyDirWithAllPerm() {
return genDeleteNonEmptyDirHelper("-rm -r", "777", "userA", true);
}
// Expect target not to be deleted
private TestDeleteHelper genRmrfNonEmptyDirWithNoPerm() {
return genDeleteNonEmptyDirHelper("-rm -r -f", "700", "userA", false);
}
// Expect target to be deleted
public TestDeleteHelper genDeleteSingleFileNotAsOwner() throws Exception {
FileEntry[] files = {
new FileEntry("userA", true, "userA", "users", "755"),
new FileEntry("userA/userB", false, "userB", "users", "700")
};
FileEntry deleteEntry = files[1];
return new TestDeleteHelper(files, deleteEntry, "-rm -r", "userA", true);
}
@Test
public void testDelete() throws Exception {
Configuration conf = null;
MiniDFSCluster cluster = null;
try {
conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
String nnUri = FileSystem.getDefaultUri(conf).toString();
FileSystem fs = FileSystem.get(URI.create(nnUri), conf);
ArrayList<TestDeleteHelper> ta = new ArrayList<TestDeleteHelper>();
// Add empty dir tests
ta.add(genRmrEmptyDirWithReadPerm());
ta.add(genRmrEmptyDirWithNoPerm());
ta.add(genRmrfEmptyDirWithNoPerm());
// Add non-empty dir tests
ta.add(genRmrNonEmptyDirWithReadPerm());
ta.add(genRmrNonEmptyDirWithNoPerm());
ta.add(genRmrNonEmptyDirWithAllPerm());
ta.add(genRmrfNonEmptyDirWithNoPerm());
// Add single tile test
ta.add(genDeleteSingleFileNotAsOwner());
// Run all tests
for(TestDeleteHelper t : ta) {
t.execute(conf, fs);
}
} finally {
if (cluster != null) { cluster.shutdown(); }
}
}
}
| 9,376 | 33.098182 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReservedRawPaths.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.JavaKeyStoreProvider;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileContextTestWrapper;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.FileSystemTestWrapper;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.client.HdfsAdmin;
import org.apache.hadoop.hdfs.server.namenode.EncryptionZoneManager;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import static org.apache.hadoop.hdfs.DFSTestUtil.verifyFilesEqual;
import static org.apache.hadoop.hdfs.DFSTestUtil.verifyFilesNotEqual;
import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
import static org.apache.hadoop.test.GenericTestUtils.assertMatches;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
public class TestReservedRawPaths {
private Configuration conf;
private FileSystemTestHelper fsHelper;
private MiniDFSCluster cluster;
private HdfsAdmin dfsAdmin;
private DistributedFileSystem fs;
private final String TEST_KEY = "test_key";
protected FileSystemTestWrapper fsWrapper;
protected FileContextTestWrapper fcWrapper;
@Before
public void setup() throws Exception {
conf = new HdfsConfiguration();
fsHelper = new FileSystemTestHelper();
// Set up java key store
String testRoot = fsHelper.getTestRootDir();
File testRootDir = new File(testRoot).getAbsoluteFile();
final Path jksPath = new Path(testRootDir.toString(), "test.jks");
conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri()
);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE);
fs = cluster.getFileSystem();
fsWrapper = new FileSystemTestWrapper(cluster.getFileSystem());
fcWrapper = new FileContextTestWrapper(
FileContext.getFileContext(cluster.getURI(), conf));
dfsAdmin = new HdfsAdmin(cluster.getURI(), conf);
// Need to set the client's KeyProvider to the NN's for JKS,
// else the updates do not get flushed properly
fs.getClient().setKeyProvider(cluster.getNameNode().getNamesystem()
.getProvider());
DFSTestUtil.createKey(TEST_KEY, cluster, conf);
}
@After
public void teardown() {
if (cluster != null) {
cluster.shutdown();
}
}
/**
* Basic read/write tests of raw files.
* Create a non-encrypted file
* Create an encryption zone
* Verify that non-encrypted file contents and decrypted file in EZ are equal
* Compare the raw encrypted bytes of the file with the decrypted version to
* ensure they're different
* Compare the raw and non-raw versions of the non-encrypted file to ensure
* they're the same.
*/
@Test(timeout = 120000)
public void testReadWriteRaw() throws Exception {
// Create a base file for comparison
final Path baseFile = new Path("/base");
final int len = 8192;
DFSTestUtil.createFile(fs, baseFile, len, (short) 1, 0xFEED);
// Create the first enc file
final Path zone = new Path("/zone");
fs.mkdirs(zone);
dfsAdmin.createEncryptionZone(zone, TEST_KEY);
final Path encFile1 = new Path(zone, "myfile");
DFSTestUtil.createFile(fs, encFile1, len, (short) 1, 0xFEED);
// Read them back in and compare byte-by-byte
verifyFilesEqual(fs, baseFile, encFile1, len);
// Raw file should be different from encrypted file
final Path encFile1Raw = new Path(zone, "/.reserved/raw/zone/myfile");
verifyFilesNotEqual(fs, encFile1Raw, encFile1, len);
// Raw file should be same as /base which is not in an EZ
final Path baseFileRaw = new Path(zone, "/.reserved/raw/base");
verifyFilesEqual(fs, baseFile, baseFileRaw, len);
}
private void assertPathEquals(Path p1, Path p2) throws IOException {
final FileStatus p1Stat = fs.getFileStatus(p1);
final FileStatus p2Stat = fs.getFileStatus(p2);
/*
* Use accessTime and modificationTime as substitutes for INode to check
* for resolution to the same underlying file.
*/
assertEquals("Access times not equal", p1Stat.getAccessTime(),
p2Stat.getAccessTime());
assertEquals("Modification times not equal", p1Stat.getModificationTime(),
p2Stat.getModificationTime());
assertEquals("pathname1 not equal", p1,
Path.getPathWithoutSchemeAndAuthority(p1Stat.getPath()));
assertEquals("pathname1 not equal", p2,
Path.getPathWithoutSchemeAndAuthority(p2Stat.getPath()));
}
/**
* Tests that getFileStatus on raw and non raw resolve to the same
* file.
*/
@Test(timeout = 120000)
public void testGetFileStatus() throws Exception {
final Path zone = new Path("zone");
final Path slashZone = new Path("/", zone);
fs.mkdirs(slashZone);
dfsAdmin.createEncryptionZone(slashZone, TEST_KEY);
final Path base = new Path("base");
final Path reservedRaw = new Path("/.reserved/raw");
final Path baseRaw = new Path(reservedRaw, base);
final int len = 8192;
DFSTestUtil.createFile(fs, baseRaw, len, (short) 1, 0xFEED);
assertPathEquals(new Path("/", base), baseRaw);
/* Repeat the test for a file in an ez. */
final Path ezEncFile = new Path(slashZone, base);
final Path ezRawEncFile =
new Path(new Path(reservedRaw, zone), base);
DFSTestUtil.createFile(fs, ezEncFile, len, (short) 1, 0xFEED);
assertPathEquals(ezEncFile, ezRawEncFile);
}
@Test(timeout = 120000)
public void testReservedRoot() throws Exception {
final Path root = new Path("/");
final Path rawRoot = new Path("/.reserved/raw");
final Path rawRootSlash = new Path("/.reserved/raw/");
assertPathEquals(root, rawRoot);
assertPathEquals(root, rawRootSlash);
}
/* Verify mkdir works ok in .reserved/raw directory. */
@Test(timeout = 120000)
public void testReservedRawMkdir() throws Exception {
final Path zone = new Path("zone");
final Path slashZone = new Path("/", zone);
fs.mkdirs(slashZone);
dfsAdmin.createEncryptionZone(slashZone, TEST_KEY);
final Path rawRoot = new Path("/.reserved/raw");
final Path dir1 = new Path("dir1");
final Path rawDir1 = new Path(rawRoot, dir1);
fs.mkdirs(rawDir1);
assertPathEquals(rawDir1, new Path("/", dir1));
fs.delete(rawDir1, true);
final Path rawZone = new Path(rawRoot, zone);
final Path rawDir1EZ = new Path(rawZone, dir1);
fs.mkdirs(rawDir1EZ);
assertPathEquals(rawDir1EZ, new Path(slashZone, dir1));
fs.delete(rawDir1EZ, true);
}
@Test(timeout = 120000)
public void testRelativePathnames() throws Exception {
final Path baseFileRaw = new Path("/.reserved/raw/base");
final int len = 8192;
DFSTestUtil.createFile(fs, baseFileRaw, len, (short) 1, 0xFEED);
final Path root = new Path("/");
final Path rawRoot = new Path("/.reserved/raw");
assertPathEquals(root, new Path(rawRoot, "../raw"));
assertPathEquals(root, new Path(rawRoot, "../../.reserved/raw"));
assertPathEquals(baseFileRaw, new Path(rawRoot, "../raw/base"));
assertPathEquals(baseFileRaw, new Path(rawRoot,
"../../.reserved/raw/base"));
assertPathEquals(baseFileRaw, new Path(rawRoot,
"../../.reserved/raw/base/../base"));
assertPathEquals(baseFileRaw, new Path(
"/.reserved/../.reserved/raw/../raw/base"));
}
@Test(timeout = 120000)
public void testAdminAccessOnly() throws Exception {
final Path zone = new Path("zone");
final Path slashZone = new Path("/", zone);
fs.mkdirs(slashZone);
dfsAdmin.createEncryptionZone(slashZone, TEST_KEY);
final Path base = new Path("base");
final Path reservedRaw = new Path("/.reserved/raw");
final int len = 8192;
/* Test failure of create file in reserved/raw as non admin */
final UserGroupInformation user = UserGroupInformation.
createUserForTesting("user", new String[] { "mygroup" });
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final DistributedFileSystem fs = cluster.getFileSystem();
try {
final Path ezRawEncFile =
new Path(new Path(reservedRaw, zone), base);
DFSTestUtil.createFile(fs, ezRawEncFile, len, (short) 1, 0xFEED);
fail("access to /.reserved/raw is superuser-only operation");
} catch (AccessControlException e) {
assertExceptionContains("Superuser privilege is required", e);
}
return null;
}
});
/* Test failure of getFileStatus in reserved/raw as non admin */
final Path ezRawEncFile = new Path(new Path(reservedRaw, zone), base);
DFSTestUtil.createFile(fs, ezRawEncFile, len, (short) 1, 0xFEED);
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final DistributedFileSystem fs = cluster.getFileSystem();
try {
fs.getFileStatus(ezRawEncFile);
fail("access to /.reserved/raw is superuser-only operation");
} catch (AccessControlException e) {
assertExceptionContains("Superuser privilege is required", e);
}
return null;
}
});
/* Test failure of listStatus in reserved/raw as non admin */
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final DistributedFileSystem fs = cluster.getFileSystem();
try {
fs.listStatus(ezRawEncFile);
fail("access to /.reserved/raw is superuser-only operation");
} catch (AccessControlException e) {
assertExceptionContains("Superuser privilege is required", e);
}
return null;
}
});
fs.setPermission(new Path("/"), new FsPermission((short) 0777));
/* Test failure of mkdir in reserved/raw as non admin */
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final DistributedFileSystem fs = cluster.getFileSystem();
final Path d1 = new Path(reservedRaw, "dir1");
try {
fs.mkdirs(d1);
fail("access to /.reserved/raw is superuser-only operation");
} catch (AccessControlException e) {
assertExceptionContains("Superuser privilege is required", e);
}
return null;
}
});
}
@Test(timeout = 120000)
public void testListDotReserved() throws Exception {
// Create a base file for comparison
final Path baseFileRaw = new Path("/.reserved/raw/base");
final int len = 8192;
DFSTestUtil.createFile(fs, baseFileRaw, len, (short) 1, 0xFEED);
/*
* Ensure that you can't list /.reserved. Ever.
*/
try {
fs.listStatus(new Path("/.reserved"));
fail("expected FNFE");
} catch (FileNotFoundException e) {
assertExceptionContains("/.reserved does not exist", e);
}
try {
fs.listStatus(new Path("/.reserved/.inodes"));
fail("expected FNFE");
} catch (FileNotFoundException e) {
assertExceptionContains(
"/.reserved/.inodes does not exist", e);
}
final FileStatus[] fileStatuses = fs.listStatus(new Path("/.reserved/raw"));
assertEquals("expected 1 entry", fileStatuses.length, 1);
assertMatches(fileStatuses[0].getPath().toString(), "/.reserved/raw/base");
}
@Test(timeout = 120000)
public void testListRecursive() throws Exception {
Path rootPath = new Path("/");
Path p = rootPath;
for (int i = 0; i < 3; i++) {
p = new Path(p, "dir" + i);
fs.mkdirs(p);
}
Path curPath = new Path("/.reserved/raw");
int cnt = 0;
FileStatus[] fileStatuses = fs.listStatus(curPath);
while (fileStatuses != null && fileStatuses.length > 0) {
FileStatus f = fileStatuses[0];
assertMatches(f.getPath().toString(), "/.reserved/raw");
curPath = Path.getPathWithoutSchemeAndAuthority(f.getPath());
cnt++;
fileStatuses = fs.listStatus(curPath);
}
assertEquals(3, cnt);
}
}
| 13,493 | 37.444444 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRead.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.nio.ByteBuffer;
import org.junit.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSTestUtil.ShortCircuitTestContext;
import org.junit.Test;
public class TestRead {
final private int BLOCK_SIZE = 512;
private void testEOF(MiniDFSCluster cluster, int fileLength) throws IOException {
FileSystem fs = cluster.getFileSystem();
Path path = new Path("testEOF." + fileLength);
DFSTestUtil.createFile(fs, path, fileLength, (short)1, 0xBEEFBEEF);
FSDataInputStream fis = fs.open(path);
ByteBuffer empty = ByteBuffer.allocate(0);
// A read into an empty bytebuffer at the beginning of the file gives 0.
Assert.assertEquals(0, fis.read(empty));
fis.seek(fileLength);
// A read into an empty bytebuffer at the end of the file gives -1.
Assert.assertEquals(-1, fis.read(empty));
if (fileLength > BLOCK_SIZE) {
fis.seek(fileLength - BLOCK_SIZE + 1);
ByteBuffer dbb = ByteBuffer.allocateDirect(BLOCK_SIZE);
Assert.assertEquals(BLOCK_SIZE - 1, fis.read(dbb));
}
fis.close();
}
@Test(timeout=60000)
public void testEOFWithBlockReaderLocal() throws Exception {
ShortCircuitTestContext testContext =
new ShortCircuitTestContext("testEOFWithBlockReaderLocal");
try {
final Configuration conf = testContext.newConfiguration();
conf.setLong(DFSConfigKeys.DFS_CLIENT_CACHE_READAHEAD, BLOCK_SIZE);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
.format(true).build();
testEOF(cluster, 1);
testEOF(cluster, 14);
testEOF(cluster, 10000);
cluster.shutdown();
} finally {
testContext.close();
}
}
@Test(timeout=60000)
public void testEOFWithRemoteBlockReader() throws Exception {
final Configuration conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_CLIENT_CACHE_READAHEAD, BLOCK_SIZE);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
.format(true).build();
testEOF(cluster, 1);
testEOF(cluster, 14);
testEOF(cluster, 10000);
cluster.shutdown();
}
/**
* Regression test for HDFS-7045.
* If deadlock happen, the test will time out.
* @throws Exception
*/
@Test(timeout=60000)
public void testReadReservedPath() throws Exception {
Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).
numDataNodes(1).format(true).build();
try {
FileSystem fs = cluster.getFileSystem();
fs.open(new Path("/.reserved/.inodes/file"));
Assert.fail("Open a non existing file should fail.");
} catch (FileNotFoundException e) {
// Expected
} finally {
cluster.shutdown();
}
}
}
| 3,809 | 34.943396 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CONTEXT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS;
import static org.hamcrest.CoreMatchers.equalTo;
import java.io.File;
import java.io.IOException;
import java.nio.channels.ClosedByInterruptException;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.shortcircuit.DfsClientShmManager.PerDatanodeVisitorInfo;
import org.apache.hadoop.hdfs.shortcircuit.DfsClientShmManager.Visitor;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplicaInfo;
import org.apache.hadoop.net.unix.DomainSocket;
import org.apache.hadoop.net.unix.TemporarySocketDirectory;
import org.junit.After;
import org.junit.Assert;
import org.junit.Assume;
import org.junit.Before;
import org.junit.Test;
import com.google.common.util.concurrent.Uninterruptibles;
public class TestBlockReaderFactory {
static final Log LOG = LogFactory.getLog(TestBlockReaderFactory.class);
@Before
public void init() {
DomainSocket.disableBindPathValidation();
Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
}
@After
public void cleanup() {
DFSInputStream.tcpReadsDisabledForTesting = false;
BlockReaderFactory.createShortCircuitReplicaInfoCallback = null;
}
public static Configuration createShortCircuitConf(String testName,
TemporarySocketDirectory sockDir) {
Configuration conf = new Configuration();
conf.set(DFS_CLIENT_CONTEXT, testName);
conf.setLong(DFS_BLOCK_SIZE_KEY, 4096);
conf.set(DFS_DOMAIN_SOCKET_PATH_KEY, new File(sockDir.getDir(),
testName + "._PORT").getAbsolutePath());
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY,
false);
conf.setBoolean(DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, false);
return conf;
}
/**
* If we have a UNIX domain socket configured,
* and we have dfs.client.domain.socket.data.traffic set to true,
* and short-circuit access fails, we should still be able to pass
* data traffic over the UNIX domain socket. Test this.
*/
@Test(timeout=60000)
public void testFallbackFromShortCircuitToUnixDomainTraffic()
throws Exception {
DFSInputStream.tcpReadsDisabledForTesting = true;
TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
// The server is NOT configured with short-circuit local reads;
// the client is. Both support UNIX domain reads.
Configuration clientConf = createShortCircuitConf(
"testFallbackFromShortCircuitToUnixDomainTraffic", sockDir);
clientConf.set(DFS_CLIENT_CONTEXT,
"testFallbackFromShortCircuitToUnixDomainTraffic_clientContext");
clientConf.setBoolean(DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, true);
Configuration serverConf = new Configuration(clientConf);
serverConf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, false);
MiniDFSCluster cluster =
new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build();
cluster.waitActive();
FileSystem dfs = FileSystem.get(cluster.getURI(0), clientConf);
String TEST_FILE = "/test_file";
final int TEST_FILE_LEN = 8193;
final int SEED = 0xFADED;
DFSTestUtil.createFile(dfs, new Path(TEST_FILE), TEST_FILE_LEN,
(short)1, SEED);
byte contents[] = DFSTestUtil.readFileBuffer(dfs, new Path(TEST_FILE));
byte expected[] = DFSTestUtil.
calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
Assert.assertTrue(Arrays.equals(contents, expected));
cluster.shutdown();
sockDir.close();
}
/**
* Test the case where we have multiple threads waiting on the
* ShortCircuitCache delivering a certain ShortCircuitReplica.
*
* In this case, there should only be one call to
* createShortCircuitReplicaInfo. This one replica should be shared
* by all threads.
*/
@Test(timeout=60000)
public void testMultipleWaitersOnShortCircuitCache()
throws Exception {
final CountDownLatch latch = new CountDownLatch(1);
final AtomicBoolean creationIsBlocked = new AtomicBoolean(true);
final AtomicBoolean testFailed = new AtomicBoolean(false);
DFSInputStream.tcpReadsDisabledForTesting = true;
BlockReaderFactory.createShortCircuitReplicaInfoCallback =
new ShortCircuitCache.ShortCircuitReplicaCreator() {
@Override
public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
Uninterruptibles.awaitUninterruptibly(latch);
if (!creationIsBlocked.compareAndSet(true, false)) {
Assert.fail("there were multiple calls to "
+ "createShortCircuitReplicaInfo. Only one was expected.");
}
return null;
}
};
TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
Configuration conf = createShortCircuitConf(
"testMultipleWaitersOnShortCircuitCache", sockDir);
MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final String TEST_FILE = "/test_file";
final int TEST_FILE_LEN = 4000;
final int SEED = 0xFADED;
final int NUM_THREADS = 10;
DFSTestUtil.createFile(dfs, new Path(TEST_FILE), TEST_FILE_LEN,
(short)1, SEED);
Runnable readerRunnable = new Runnable() {
@Override
public void run() {
try {
byte contents[] = DFSTestUtil.readFileBuffer(dfs, new Path(TEST_FILE));
Assert.assertFalse(creationIsBlocked.get());
byte expected[] = DFSTestUtil.
calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
Assert.assertTrue(Arrays.equals(contents, expected));
} catch (Throwable e) {
LOG.error("readerRunnable error", e);
testFailed.set(true);
}
}
};
Thread threads[] = new Thread[NUM_THREADS];
for (int i = 0; i < NUM_THREADS; i++) {
threads[i] = new Thread(readerRunnable);
threads[i].start();
}
Thread.sleep(500);
latch.countDown();
for (int i = 0; i < NUM_THREADS; i++) {
Uninterruptibles.joinUninterruptibly(threads[i]);
}
cluster.shutdown();
sockDir.close();
Assert.assertFalse(testFailed.get());
}
/**
* Test the case where we have a failure to complete a short circuit read
* that occurs, and then later on, we have a success.
* Any thread waiting on a cache load should receive the failure (if it
* occurs); however, the failure result should not be cached. We want
* to be able to retry later and succeed.
*/
@Test(timeout=60000)
public void testShortCircuitCacheTemporaryFailure()
throws Exception {
BlockReaderTestUtil.enableBlockReaderFactoryTracing();
final AtomicBoolean replicaCreationShouldFail = new AtomicBoolean(true);
final AtomicBoolean testFailed = new AtomicBoolean(false);
DFSInputStream.tcpReadsDisabledForTesting = true;
BlockReaderFactory.createShortCircuitReplicaInfoCallback =
new ShortCircuitCache.ShortCircuitReplicaCreator() {
@Override
public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
if (replicaCreationShouldFail.get()) {
// Insert a short delay to increase the chance that one client
// thread waits for the other client thread's failure via
// a condition variable.
Uninterruptibles.sleepUninterruptibly(2, TimeUnit.SECONDS);
return new ShortCircuitReplicaInfo();
}
return null;
}
};
TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
Configuration conf = createShortCircuitConf(
"testShortCircuitCacheTemporaryFailure", sockDir);
final MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final String TEST_FILE = "/test_file";
final int TEST_FILE_LEN = 4000;
final int NUM_THREADS = 2;
final int SEED = 0xFADED;
final CountDownLatch gotFailureLatch = new CountDownLatch(NUM_THREADS);
final CountDownLatch shouldRetryLatch = new CountDownLatch(1);
DFSTestUtil.createFile(dfs, new Path(TEST_FILE), TEST_FILE_LEN,
(short)1, SEED);
Runnable readerRunnable = new Runnable() {
@Override
public void run() {
try {
// First time should fail.
List<LocatedBlock> locatedBlocks =
cluster.getNameNode().getRpcServer().getBlockLocations(
TEST_FILE, 0, TEST_FILE_LEN).getLocatedBlocks();
LocatedBlock lblock = locatedBlocks.get(0); // first block
BlockReader blockReader = null;
try {
blockReader = BlockReaderTestUtil.
getBlockReader(cluster, lblock, 0, TEST_FILE_LEN);
Assert.fail("expected getBlockReader to fail the first time.");
} catch (Throwable t) {
Assert.assertTrue("expected to see 'TCP reads were disabled " +
"for testing' in exception " + t, t.getMessage().contains(
"TCP reads were disabled for testing"));
} finally {
if (blockReader != null) blockReader.close(); // keep findbugs happy
}
gotFailureLatch.countDown();
shouldRetryLatch.await();
// Second time should succeed.
try {
blockReader = BlockReaderTestUtil.
getBlockReader(cluster, lblock, 0, TEST_FILE_LEN);
} catch (Throwable t) {
LOG.error("error trying to retrieve a block reader " +
"the second time.", t);
throw t;
} finally {
if (blockReader != null) blockReader.close();
}
} catch (Throwable t) {
LOG.error("getBlockReader failure", t);
testFailed.set(true);
}
}
};
Thread threads[] = new Thread[NUM_THREADS];
for (int i = 0; i < NUM_THREADS; i++) {
threads[i] = new Thread(readerRunnable);
threads[i].start();
}
gotFailureLatch.await();
replicaCreationShouldFail.set(false);
shouldRetryLatch.countDown();
for (int i = 0; i < NUM_THREADS; i++) {
Uninterruptibles.joinUninterruptibly(threads[i]);
}
cluster.shutdown();
sockDir.close();
Assert.assertFalse(testFailed.get());
}
/**
* Test that a client which supports short-circuit reads using
* shared memory can fall back to not using shared memory when
* the server doesn't support it.
*/
@Test
public void testShortCircuitReadFromServerWithoutShm() throws Exception {
TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
Configuration clientConf = createShortCircuitConf(
"testShortCircuitReadFromServerWithoutShm", sockDir);
Configuration serverConf = new Configuration(clientConf);
serverConf.setInt(
DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS, 0);
DFSInputStream.tcpReadsDisabledForTesting = true;
final MiniDFSCluster cluster =
new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build();
cluster.waitActive();
clientConf.set(DFS_CLIENT_CONTEXT,
"testShortCircuitReadFromServerWithoutShm_clientContext");
final DistributedFileSystem fs =
(DistributedFileSystem)FileSystem.get(cluster.getURI(0), clientConf);
final String TEST_FILE = "/test_file";
final int TEST_FILE_LEN = 4000;
final int SEED = 0xFADEC;
DFSTestUtil.createFile(fs, new Path(TEST_FILE), TEST_FILE_LEN,
(short)1, SEED);
byte contents[] = DFSTestUtil.readFileBuffer(fs, new Path(TEST_FILE));
byte expected[] = DFSTestUtil.
calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
Assert.assertTrue(Arrays.equals(contents, expected));
final ShortCircuitCache cache =
fs.dfs.getClientContext().getShortCircuitCache();
final DatanodeInfo datanode =
new DatanodeInfo(cluster.getDataNodes().get(0).getDatanodeId());
cache.getDfsClientShmManager().visit(new Visitor() {
@Override
public void visit(HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info)
throws IOException {
Assert.assertEquals(1, info.size());
PerDatanodeVisitorInfo vinfo = info.get(datanode);
Assert.assertTrue(vinfo.disabled);
Assert.assertEquals(0, vinfo.full.size());
Assert.assertEquals(0, vinfo.notFull.size());
}
});
cluster.shutdown();
sockDir.close();
}
/**
* Test that a client which does not support short-circuit reads using
* shared memory can talk with a server which supports it.
*/
@Test
public void testShortCircuitReadFromClientWithoutShm() throws Exception {
TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
Configuration clientConf = createShortCircuitConf(
"testShortCircuitReadWithoutShm", sockDir);
Configuration serverConf = new Configuration(clientConf);
DFSInputStream.tcpReadsDisabledForTesting = true;
final MiniDFSCluster cluster =
new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build();
cluster.waitActive();
clientConf.setInt(
DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS, 0);
clientConf.set(DFS_CLIENT_CONTEXT,
"testShortCircuitReadFromClientWithoutShm_clientContext");
final DistributedFileSystem fs =
(DistributedFileSystem)FileSystem.get(cluster.getURI(0), clientConf);
final String TEST_FILE = "/test_file";
final int TEST_FILE_LEN = 4000;
final int SEED = 0xFADEC;
DFSTestUtil.createFile(fs, new Path(TEST_FILE), TEST_FILE_LEN,
(short)1, SEED);
byte contents[] = DFSTestUtil.readFileBuffer(fs, new Path(TEST_FILE));
byte expected[] = DFSTestUtil.
calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
Assert.assertTrue(Arrays.equals(contents, expected));
final ShortCircuitCache cache =
fs.dfs.getClientContext().getShortCircuitCache();
Assert.assertEquals(null, cache.getDfsClientShmManager());
cluster.shutdown();
sockDir.close();
}
/**
* Test shutting down the ShortCircuitCache while there are things in it.
*/
@Test
public void testShortCircuitCacheShutdown() throws Exception {
TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
Configuration conf = createShortCircuitConf(
"testShortCircuitCacheShutdown", sockDir);
conf.set(DFS_CLIENT_CONTEXT, "testShortCircuitCacheShutdown");
Configuration serverConf = new Configuration(conf);
DFSInputStream.tcpReadsDisabledForTesting = true;
final MiniDFSCluster cluster =
new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build();
cluster.waitActive();
final DistributedFileSystem fs =
(DistributedFileSystem)FileSystem.get(cluster.getURI(0), conf);
final String TEST_FILE = "/test_file";
final int TEST_FILE_LEN = 4000;
final int SEED = 0xFADEC;
DFSTestUtil.createFile(fs, new Path(TEST_FILE), TEST_FILE_LEN,
(short)1, SEED);
byte contents[] = DFSTestUtil.readFileBuffer(fs, new Path(TEST_FILE));
byte expected[] = DFSTestUtil.
calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
Assert.assertTrue(Arrays.equals(contents, expected));
final ShortCircuitCache cache =
fs.dfs.getClientContext().getShortCircuitCache();
cache.close();
Assert.assertTrue(cache.getDfsClientShmManager().
getDomainSocketWatcher().isClosed());
cluster.shutdown();
sockDir.close();
}
/**
* When an InterruptedException is sent to a thread calling
* FileChannel#read, the FileChannel is immediately closed and the
* thread gets an exception. This effectively means that we might have
* someone asynchronously calling close() on the file descriptors we use
* in BlockReaderLocal. So when unreferencing a ShortCircuitReplica in
* ShortCircuitCache#unref, we should check if the FileChannel objects
* are still open. If not, we should purge the replica to avoid giving
* it out to any future readers.
*
* This is a regression test for HDFS-6227: Short circuit read failed
* due to ClosedChannelException.
*
* Note that you may still get ClosedChannelException errors if two threads
* are reading from the same replica and an InterruptedException is delivered
* to one of them.
*/
@Test(timeout=120000)
public void testPurgingClosedReplicas() throws Exception {
BlockReaderTestUtil.enableBlockReaderFactoryTracing();
final AtomicInteger replicasCreated = new AtomicInteger(0);
final AtomicBoolean testFailed = new AtomicBoolean(false);
DFSInputStream.tcpReadsDisabledForTesting = true;
BlockReaderFactory.createShortCircuitReplicaInfoCallback =
new ShortCircuitCache.ShortCircuitReplicaCreator() {
@Override
public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
replicasCreated.incrementAndGet();
return null;
}
};
TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
Configuration conf = createShortCircuitConf(
"testPurgingClosedReplicas", sockDir);
final MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final String TEST_FILE = "/test_file";
final int TEST_FILE_LEN = 4095;
final int SEED = 0xFADE0;
final DistributedFileSystem fs =
(DistributedFileSystem)FileSystem.get(cluster.getURI(0), conf);
DFSTestUtil.createFile(fs, new Path(TEST_FILE), TEST_FILE_LEN,
(short)1, SEED);
final Semaphore sem = new Semaphore(0);
final List<LocatedBlock> locatedBlocks =
cluster.getNameNode().getRpcServer().getBlockLocations(
TEST_FILE, 0, TEST_FILE_LEN).getLocatedBlocks();
final LocatedBlock lblock = locatedBlocks.get(0); // first block
final byte[] buf = new byte[TEST_FILE_LEN];
Runnable readerRunnable = new Runnable() {
@Override
public void run() {
try {
while (true) {
BlockReader blockReader = null;
try {
blockReader = BlockReaderTestUtil.
getBlockReader(cluster, lblock, 0, TEST_FILE_LEN);
sem.release();
try {
blockReader.readAll(buf, 0, TEST_FILE_LEN);
} finally {
sem.acquireUninterruptibly();
}
} catch (ClosedByInterruptException e) {
LOG.info("got the expected ClosedByInterruptException", e);
sem.release();
break;
} finally {
if (blockReader != null) blockReader.close();
}
LOG.info("read another " + TEST_FILE_LEN + " bytes.");
}
} catch (Throwable t) {
LOG.error("getBlockReader failure", t);
testFailed.set(true);
sem.release();
}
}
};
Thread thread = new Thread(readerRunnable);
thread.start();
// While the thread is reading, send it interrupts.
// These should trigger a ClosedChannelException.
while (thread.isAlive()) {
sem.acquireUninterruptibly();
thread.interrupt();
sem.release();
}
Assert.assertFalse(testFailed.get());
// We should be able to read from the file without
// getting a ClosedChannelException.
BlockReader blockReader = null;
try {
blockReader = BlockReaderTestUtil.
getBlockReader(cluster, lblock, 0, TEST_FILE_LEN);
blockReader.readFully(buf, 0, TEST_FILE_LEN);
} finally {
if (blockReader != null) blockReader.close();
}
byte expected[] = DFSTestUtil.
calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
Assert.assertTrue(Arrays.equals(buf, expected));
// Another ShortCircuitReplica object should have been created.
Assert.assertEquals(2, replicasCreated.get());
dfs.close();
cluster.shutdown();
sockDir.close();
}
}
| 22,092 | 40.295327 | 110 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileLengthOnClusterRestart.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
import org.junit.Assert;
import org.junit.Test;
/** Test the fileLength on cluster restarts */
public class TestFileLengthOnClusterRestart {
/**
* Tests the fileLength when we sync the file and restart the cluster and
* Datanodes not report to Namenode yet.
*/
@Test(timeout = 60000)
public void testFileLengthWithHSyncAndClusterRestartWithOutDNsRegister()
throws Exception {
final Configuration conf = new HdfsConfiguration();
// create cluster
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(2).build();
HdfsDataInputStream in = null;
try {
Path path = new Path("/tmp/TestFileLengthOnClusterRestart", "test");
DistributedFileSystem dfs = cluster
.getFileSystem();
FSDataOutputStream out = dfs.create(path);
int fileLength = 1030;
out.write(new byte[fileLength]);
out.hsync();
cluster.restartNameNode();
cluster.waitActive();
in = (HdfsDataInputStream) dfs.open(path, 1024);
// Verify the length when we just restart NN. DNs will register
// immediately.
Assert.assertEquals(fileLength, in.getVisibleLength());
cluster.shutdownDataNodes();
cluster.restartNameNode(false);
// This is just for ensuring NN started.
verifyNNIsInSafeMode(dfs);
try {
in = (HdfsDataInputStream) dfs.open(path);
Assert.fail("Expected IOException");
} catch (IOException e) {
Assert.assertTrue(e.getLocalizedMessage().indexOf(
"Name node is in safe mode") >= 0);
}
} finally {
if (null != in) {
in.close();
}
cluster.shutdown();
}
}
private void verifyNNIsInSafeMode(DistributedFileSystem dfs)
throws IOException {
while (true) {
try {
if (dfs.isInSafeMode()) {
return;
} else {
throw new IOException("Expected to be in SafeMode");
}
} catch (IOException e) {
// NN might not started completely Ignore
}
}
}
}
| 4,458 | 45.936842 | 130 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesWithHA.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.JavaKeyStoreProvider;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
import org.apache.hadoop.hdfs.client.HdfsAdmin;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import java.io.File;
import java.io.IOException;
/**
* Tests interaction of encryption zones with HA failover.
*/
public class TestEncryptionZonesWithHA {
private Configuration conf;
private MiniDFSCluster cluster;
private NameNode nn0;
private NameNode nn1;
private DistributedFileSystem fs;
private HdfsAdmin dfsAdmin0;
private HdfsAdmin dfsAdmin1;
private FileSystemTestHelper fsHelper;
private File testRootDir;
private final String TEST_KEY = "test_key";
@Before
public void setupCluster() throws Exception {
conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
HAUtil.setAllowStandbyReads(conf, true);
fsHelper = new FileSystemTestHelper();
String testRoot = fsHelper.getTestRootDir();
testRootDir = new File(testRoot).getAbsoluteFile();
conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
JavaKeyStoreProvider.SCHEME_NAME + "://file" +
new Path(testRootDir.toString(), "test.jks").toUri()
);
cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleHATopology())
.numDataNodes(1)
.build();
cluster.waitActive();
cluster.transitionToActive(0);
fs = (DistributedFileSystem)HATestUtil.configureFailoverFs(cluster, conf);
DFSTestUtil.createKey(TEST_KEY, cluster, 0, conf);
DFSTestUtil.createKey(TEST_KEY, cluster, 1, conf);
nn0 = cluster.getNameNode(0);
nn1 = cluster.getNameNode(1);
dfsAdmin0 = new HdfsAdmin(cluster.getURI(0), conf);
dfsAdmin1 = new HdfsAdmin(cluster.getURI(1), conf);
KeyProviderCryptoExtension nn0Provider =
cluster.getNameNode(0).getNamesystem().getProvider();
fs.getClient().setKeyProvider(nn0Provider);
}
@After
public void shutdownCluster() throws IOException {
if (cluster != null) {
cluster.shutdown();
}
}
/**
* Test that encryption zones are properly tracked by the standby.
*/
@Test(timeout = 60000)
public void testEncryptionZonesTrackedOnStandby() throws Exception {
final int len = 8196;
final Path dir = new Path("/enc");
final Path dirChild = new Path(dir, "child");
final Path dirFile = new Path(dir, "file");
fs.mkdir(dir, FsPermission.getDirDefault());
dfsAdmin0.createEncryptionZone(dir, TEST_KEY);
fs.mkdir(dirChild, FsPermission.getDirDefault());
DFSTestUtil.createFile(fs, dirFile, len, (short) 1, 0xFEED);
String contents = DFSTestUtil.readFile(fs, dirFile);
// Failover the current standby to active.
HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
cluster.shutdownNameNode(0);
cluster.transitionToActive(1);
Assert.assertEquals("Got unexpected ez path", dir.toString(),
dfsAdmin1.getEncryptionZoneForPath(dir).getPath().toString());
Assert.assertEquals("Got unexpected ez path", dir.toString(),
dfsAdmin1.getEncryptionZoneForPath(dirChild).getPath().toString());
Assert.assertEquals("File contents after failover were changed",
contents, DFSTestUtil.readFile(fs, dirFile));
}
}
| 4,474 | 35.680328 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import java.io.IOException;
import java.util.ArrayList;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
import org.junit.After;
import org.junit.Test;
/**
* This class tests DatanodeDescriptor.getBlocksScheduled() at the
* NameNode. This counter is supposed to keep track of blocks currently
* scheduled to a datanode.
*/
public class TestBlocksScheduledCounter {
MiniDFSCluster cluster = null;
FileSystem fs = null;
@After
public void tearDown() throws IOException {
if (fs != null) {
fs.close();
}
if(cluster!=null){
cluster.shutdown();
}
}
@Test
public void testBlocksScheduledCounter() throws IOException {
cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).build();
cluster.waitActive();
fs = cluster.getFileSystem();
//open a file an write a few bytes:
FSDataOutputStream out = fs.create(new Path("/testBlockScheduledCounter"));
for (int i=0; i<1024; i++) {
out.write(i);
}
// flush to make sure a block is allocated.
out.hflush();
ArrayList<DatanodeDescriptor> dnList = new ArrayList<DatanodeDescriptor>();
final DatanodeManager dm = cluster.getNamesystem().getBlockManager(
).getDatanodeManager();
dm.fetchDatanodes(dnList, dnList, false);
DatanodeDescriptor dn = dnList.get(0);
assertEquals(1, dn.getBlocksScheduled());
// close the file and the counter should go to zero.
out.close();
assertEquals(0, dn.getBlocksScheduled());
}
}
| 2,612 | 31.6625 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import java.io.File;
import java.io.IOException;
import java.util.EnumMap;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes;
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import org.apache.hadoop.hdfs.server.namenode.NNStorage;
import org.apache.hadoop.hdfs.util.Holder;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.PathUtils;
import org.junit.Test;
/**
* Unit test to make sure that Append properly logs the right
* things to the edit log, such that files aren't lost or truncated
* on restart.
*/
public class TestFileAppendRestart {
private static final int BLOCK_SIZE = 4096;
private static final String HADOOP_23_BROKEN_APPEND_TGZ =
"image-with-buggy-append.tgz";
private void writeAndAppend(FileSystem fs, Path p,
int lengthForCreate, int lengthForAppend) throws IOException {
// Creating a file with 4096 blockSize to write multiple blocks
FSDataOutputStream stream = fs.create(
p, true, BLOCK_SIZE, (short) 1, BLOCK_SIZE);
try {
AppendTestUtil.write(stream, 0, lengthForCreate);
stream.close();
stream = fs.append(p);
AppendTestUtil.write(stream, lengthForCreate, lengthForAppend);
stream.close();
} finally {
IOUtils.closeStream(stream);
}
int totalLength = lengthForCreate + lengthForAppend;
assertEquals(totalLength, fs.getFileStatus(p).getLen());
}
/**
* Regression test for HDFS-2991. Creates and appends to files
* where blocks start/end on block boundaries.
*/
@Test
public void testAppendRestart() throws Exception {
final Configuration conf = new HdfsConfiguration();
// Turn off persistent IPC, so that the DFSClient can survive NN restart
conf.setInt(
CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
0);
MiniDFSCluster cluster = null;
FSDataOutputStream stream = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
FileSystem fs = cluster.getFileSystem();
File editLog =
new File(FSImageTestUtil.getNameNodeCurrentDirs(cluster, 0).get(0),
NNStorage.getInProgressEditsFileName(1));
EnumMap<FSEditLogOpCodes, Holder<Integer>> counts;
Path p1 = new Path("/block-boundaries");
writeAndAppend(fs, p1, BLOCK_SIZE, BLOCK_SIZE);
counts = FSImageTestUtil.countEditLogOpTypes(editLog);
// OP_ADD to create file
// OP_ADD_BLOCK for first block
// OP_CLOSE to close file
// OP_APPEND to reopen file
// OP_ADD_BLOCK for second block
// OP_CLOSE to close file
assertEquals(1, (int)counts.get(FSEditLogOpCodes.OP_ADD).held);
assertEquals(1, (int)counts.get(FSEditLogOpCodes.OP_APPEND).held);
assertEquals(2, (int)counts.get(FSEditLogOpCodes.OP_ADD_BLOCK).held);
assertEquals(2, (int)counts.get(FSEditLogOpCodes.OP_CLOSE).held);
Path p2 = new Path("/not-block-boundaries");
writeAndAppend(fs, p2, BLOCK_SIZE/2, BLOCK_SIZE);
counts = FSImageTestUtil.countEditLogOpTypes(editLog);
// OP_ADD to create file
// OP_ADD_BLOCK for first block
// OP_CLOSE to close file
// OP_APPEND to re-establish the lease
// OP_UPDATE_BLOCKS from the updatePipeline call (increments genstamp of last block)
// OP_ADD_BLOCK at the start of the second block
// OP_CLOSE to close file
// Total: 2 OP_ADDs, 1 OP_UPDATE_BLOCKS, 2 OP_ADD_BLOCKs, and 2 OP_CLOSEs
// in addition to the ones above
assertEquals(2, (int)counts.get(FSEditLogOpCodes.OP_ADD).held);
assertEquals(2, (int)counts.get(FSEditLogOpCodes.OP_APPEND).held);
assertEquals(1, (int)counts.get(FSEditLogOpCodes.OP_UPDATE_BLOCKS).held);
assertEquals(2+2, (int)counts.get(FSEditLogOpCodes.OP_ADD_BLOCK).held);
assertEquals(2+2, (int)counts.get(FSEditLogOpCodes.OP_CLOSE).held);
cluster.restartNameNode();
AppendTestUtil.check(fs, p1, 2*BLOCK_SIZE);
AppendTestUtil.check(fs, p2, 3*BLOCK_SIZE/2);
} finally {
IOUtils.closeStream(stream);
if (cluster != null) { cluster.shutdown(); }
}
}
/**
* Earlier versions of HDFS had a bug (HDFS-2991) which caused
* append(), when called exactly at a block boundary,
* to not log an OP_ADD. This ensures that we can read from
* such buggy versions correctly, by loading an image created
* using a namesystem image created with 0.23.1-rc2 exhibiting
* the issue.
*/
@Test
public void testLoadLogsFromBuggyEarlierVersions() throws IOException {
final Configuration conf = new HdfsConfiguration();
String tarFile = System.getProperty("test.cache.data", "build/test/cache")
+ "/" + HADOOP_23_BROKEN_APPEND_TGZ;
String testDir = PathUtils.getTestDirName(getClass());
File dfsDir = new File(testDir, "image-with-buggy-append");
if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
}
FileUtil.unTar(new File(tarFile), new File(testDir));
File nameDir = new File(dfsDir, "name");
GenericTestUtils.assertExists(nameDir);
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath());
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
.format(false)
.manageDataDfsDirs(false)
.manageNameDfsDirs(false)
.numDataNodes(0)
.waitSafeMode(false)
.startupOption(StartupOption.UPGRADE)
.build();
try {
FileSystem fs = cluster.getFileSystem();
Path testPath = new Path("/tmp/io_data/test_io_0");
assertEquals(2*1024*1024, fs.getFileStatus(testPath).getLen());
} finally {
cluster.shutdown();
}
}
/**
* Test to append to the file, when one of datanode in the existing pipeline
* is down.
*/
@Test
public void testAppendWithPipelineRecovery() throws Exception {
Configuration conf = new Configuration();
MiniDFSCluster cluster = null;
FSDataOutputStream out = null;
try {
cluster = new MiniDFSCluster.Builder(conf).manageDataDfsDirs(true)
.manageNameDfsDirs(true).numDataNodes(4)
.racks(new String[] { "/rack1", "/rack1", "/rack2", "/rack2" })
.build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
Path path = new Path("/test1");
out = fs.create(path, true, BLOCK_SIZE, (short) 3, BLOCK_SIZE);
AppendTestUtil.write(out, 0, 1024);
out.close();
cluster.stopDataNode(3);
out = fs.append(path);
AppendTestUtil.write(out, 1024, 1024);
out.close();
cluster.restartNameNode(true);
AppendTestUtil.check(fs, path, 2048);
} finally {
IOUtils.closeStream(out);
if (null != cluster) {
cluster.shutdown();
}
}
}
}
| 8,212 | 36.502283 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.concurrent.ExecutionException;
import com.google.common.base.Supplier;
import com.google.common.collect.Lists;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DecommissionManager;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.PathUtils;
import org.apache.log4j.Level;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
import org.mortbay.util.ajax.JSON;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class tests the decommissioning of nodes.
*/
public class TestDecommission {
public static final Logger LOG = LoggerFactory.getLogger(TestDecommission
.class);
static final long seed = 0xDEADBEEFL;
static final int blockSize = 8192;
static final int fileSize = 16384;
static final int HEARTBEAT_INTERVAL = 1; // heartbeat interval in seconds
static final int BLOCKREPORT_INTERVAL_MSEC = 1000; //block report in msec
static final int NAMENODE_REPLICATION_INTERVAL = 1; //replication interval
final Random myrand = new Random();
Path dir;
Path hostsFile;
Path excludeFile;
FileSystem localFileSys;
Configuration conf;
MiniDFSCluster cluster = null;
@Before
public void setup() throws IOException {
conf = new HdfsConfiguration();
// Set up the hosts/exclude files.
localFileSys = FileSystem.getLocal(conf);
Path workingDir = localFileSys.getWorkingDirectory();
dir = new Path(workingDir, PathUtils.getTestDirName(getClass()) + "/work-dir/decommission");
hostsFile = new Path(dir, "hosts");
excludeFile = new Path(dir, "exclude");
// Setup conf
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, false);
conf.set(DFSConfigKeys.DFS_HOSTS, hostsFile.toUri().getPath());
conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 2000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, HEARTBEAT_INTERVAL);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, BLOCKREPORT_INTERVAL_MSEC);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 4);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, NAMENODE_REPLICATION_INTERVAL);
writeConfigFile(hostsFile, null);
writeConfigFile(excludeFile, null);
}
@After
public void teardown() throws IOException {
cleanupFile(localFileSys, dir);
if (cluster != null) {
cluster.shutdown();
}
}
private void writeConfigFile(Path name, List<String> nodes)
throws IOException {
// delete if it already exists
if (localFileSys.exists(name)) {
localFileSys.delete(name, true);
}
FSDataOutputStream stm = localFileSys.create(name);
if (nodes != null) {
for (Iterator<String> it = nodes.iterator(); it.hasNext();) {
String node = it.next();
stm.writeBytes(node);
stm.writeBytes("\n");
}
}
stm.close();
}
private void writeFile(FileSystem fileSys, Path name, int repl)
throws IOException {
// create and write a file that contains three blocks of data
FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
(short) repl, blockSize);
byte[] buffer = new byte[fileSize];
Random rand = new Random(seed);
rand.nextBytes(buffer);
stm.write(buffer);
stm.close();
LOG.info("Created file " + name + " with " + repl + " replicas.");
}
/**
* Verify that the number of replicas are as expected for each block in
* the given file.
* For blocks with a decommissioned node, verify that their replication
* is 1 more than what is specified.
* For blocks without decommissioned nodes, verify their replication is
* equal to what is specified.
*
* @param downnode - if null, there is no decommissioned node for this file.
* @return - null if no failure found, else an error message string.
*/
private static String checkFile(FileSystem fileSys, Path name, int repl,
String downnode, int numDatanodes) throws IOException {
boolean isNodeDown = (downnode != null);
// need a raw stream
assertTrue("Not HDFS:"+fileSys.getUri(),
fileSys instanceof DistributedFileSystem);
HdfsDataInputStream dis = (HdfsDataInputStream)
fileSys.open(name);
Collection<LocatedBlock> dinfo = dis.getAllBlocks();
for (LocatedBlock blk : dinfo) { // for each block
int hasdown = 0;
DatanodeInfo[] nodes = blk.getLocations();
for (int j = 0; j < nodes.length; j++) { // for each replica
if (isNodeDown && nodes[j].getXferAddr().equals(downnode)) {
hasdown++;
//Downnode must actually be decommissioned
if (!nodes[j].isDecommissioned()) {
return "For block " + blk.getBlock() + " replica on " +
nodes[j] + " is given as downnode, " +
"but is not decommissioned";
}
//Decommissioned node (if any) should only be last node in list.
if (j != nodes.length - 1) {
return "For block " + blk.getBlock() + " decommissioned node "
+ nodes[j] + " was not last node in list: "
+ (j + 1) + " of " + nodes.length;
}
LOG.info("Block " + blk.getBlock() + " replica on " +
nodes[j] + " is decommissioned.");
} else {
//Non-downnodes must not be decommissioned
if (nodes[j].isDecommissioned()) {
return "For block " + blk.getBlock() + " replica on " +
nodes[j] + " is unexpectedly decommissioned";
}
}
}
LOG.info("Block " + blk.getBlock() + " has " + hasdown
+ " decommissioned replica.");
if(Math.min(numDatanodes, repl+hasdown) != nodes.length) {
return "Wrong number of replicas for block " + blk.getBlock() +
": " + nodes.length + ", expected " +
Math.min(numDatanodes, repl+hasdown);
}
}
return null;
}
private void cleanupFile(FileSystem fileSys, Path name) throws IOException {
assertTrue(fileSys.exists(name));
fileSys.delete(name, true);
assertTrue(!fileSys.exists(name));
}
/*
* decommission the DN at index dnIndex or one random node if dnIndex is set
* to -1 and wait for the node to reach the given {@code waitForState}.
*/
private DatanodeInfo decommissionNode(int nnIndex,
String datanodeUuid,
ArrayList<DatanodeInfo>decommissionedNodes,
AdminStates waitForState)
throws IOException {
DFSClient client = getDfsClient(cluster.getNameNode(nnIndex), conf);
DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
//
// pick one datanode randomly unless the caller specifies one.
//
int index = 0;
if (datanodeUuid == null) {
boolean found = false;
while (!found) {
index = myrand.nextInt(info.length);
if (!info[index].isDecommissioned()) {
found = true;
}
}
} else {
// The caller specifies a DN
for (; index < info.length; index++) {
if (info[index].getDatanodeUuid().equals(datanodeUuid)) {
break;
}
}
if (index == info.length) {
throw new IOException("invalid datanodeUuid " + datanodeUuid);
}
}
String nodename = info[index].getXferAddr();
LOG.info("Decommissioning node: " + nodename);
// write nodename into the exclude file.
ArrayList<String> nodes = new ArrayList<String>();
if (decommissionedNodes != null) {
for (DatanodeInfo dn : decommissionedNodes) {
nodes.add(dn.getName());
}
}
nodes.add(nodename);
writeConfigFile(excludeFile, nodes);
refreshNodes(cluster.getNamesystem(nnIndex), conf);
DatanodeInfo ret = NameNodeAdapter.getDatanode(
cluster.getNamesystem(nnIndex), info[index]);
waitNodeState(ret, waitForState);
return ret;
}
/* Ask a specific NN to stop decommission of the datanode and wait for each
* to reach the NORMAL state.
*/
private void recommissionNode(int nnIndex, DatanodeInfo decommissionedNode) throws IOException {
LOG.info("Recommissioning node: " + decommissionedNode);
writeConfigFile(excludeFile, null);
refreshNodes(cluster.getNamesystem(nnIndex), conf);
waitNodeState(decommissionedNode, AdminStates.NORMAL);
}
/*
* Wait till node is fully decommissioned.
*/
private void waitNodeState(DatanodeInfo node,
AdminStates state) {
boolean done = state == node.getAdminState();
while (!done) {
LOG.info("Waiting for node " + node + " to change state to "
+ state + " current state: " + node.getAdminState());
try {
Thread.sleep(HEARTBEAT_INTERVAL * 500);
} catch (InterruptedException e) {
// nothing
}
done = state == node.getAdminState();
}
LOG.info("node " + node + " reached the state " + state);
}
/* Get DFSClient to the namenode */
private static DFSClient getDfsClient(NameNode nn,
Configuration conf) throws IOException {
return new DFSClient(nn.getNameNodeAddress(), conf);
}
/* Validate cluster has expected number of datanodes */
private static void validateCluster(DFSClient client, int numDNs)
throws IOException {
DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
assertEquals("Number of Datanodes ", numDNs, info.length);
}
/** Start a MiniDFSCluster
* @throws IOException */
private void startCluster(int numNameNodes, int numDatanodes,
Configuration conf) throws IOException {
cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleFederatedTopology(numNameNodes))
.numDataNodes(numDatanodes).build();
cluster.waitActive();
for (int i = 0; i < numNameNodes; i++) {
DFSClient client = getDfsClient(cluster.getNameNode(i), conf);
validateCluster(client, numDatanodes);
}
}
static void refreshNodes(final FSNamesystem ns, final Configuration conf
) throws IOException {
ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
}
private void verifyStats(NameNode namenode, FSNamesystem fsn,
DatanodeInfo info, DataNode node, boolean decommissioning)
throws InterruptedException, IOException {
// Do the stats check over 10 heartbeats
for (int i = 0; i < 10; i++) {
long[] newStats = namenode.getRpcServer().getStats();
// For decommissioning nodes, ensure capacity of the DN is no longer
// counted. Only used space of the DN is counted in cluster capacity
assertEquals(newStats[0],
decommissioning ? info.getDfsUsed() : info.getCapacity());
// Ensure cluster used capacity is counted for both normal and
// decommissioning nodes
assertEquals(newStats[1], info.getDfsUsed());
// For decommissioning nodes, remaining space from the DN is not counted
assertEquals(newStats[2], decommissioning ? 0 : info.getRemaining());
// Ensure transceiver count is same as that DN
assertEquals(fsn.getTotalLoad(), info.getXceiverCount());
DataNodeTestUtils.triggerHeartbeat(node);
}
}
/**
* Tests decommission for non federated cluster
*/
@Test(timeout=360000)
public void testDecommission() throws IOException {
testDecommission(1, 6);
}
/**
* Tests decommission with replicas on the target datanode cannot be migrated
* to other datanodes and satisfy the replication factor. Make sure the
* datanode won't get stuck in decommissioning state.
*/
@Test(timeout = 360000)
public void testDecommission2() throws IOException {
LOG.info("Starting test testDecommission");
int numNamenodes = 1;
int numDatanodes = 4;
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
startCluster(numNamenodes, numDatanodes, conf);
ArrayList<ArrayList<DatanodeInfo>> namenodeDecomList = new ArrayList<ArrayList<DatanodeInfo>>(
numNamenodes);
namenodeDecomList.add(0, new ArrayList<DatanodeInfo>(numDatanodes));
Path file1 = new Path("testDecommission2.dat");
int replicas = 4;
// Start decommissioning one namenode at a time
ArrayList<DatanodeInfo> decommissionedNodes = namenodeDecomList.get(0);
FileSystem fileSys = cluster.getFileSystem(0);
FSNamesystem ns = cluster.getNamesystem(0);
writeFile(fileSys, file1, replicas);
int deadDecomissioned = ns.getNumDecomDeadDataNodes();
int liveDecomissioned = ns.getNumDecomLiveDataNodes();
// Decommission one node. Verify that node is decommissioned.
DatanodeInfo decomNode = decommissionNode(0, null, decommissionedNodes,
AdminStates.DECOMMISSIONED);
decommissionedNodes.add(decomNode);
assertEquals(deadDecomissioned, ns.getNumDecomDeadDataNodes());
assertEquals(liveDecomissioned + 1, ns.getNumDecomLiveDataNodes());
// Ensure decommissioned datanode is not automatically shutdown
DFSClient client = getDfsClient(cluster.getNameNode(0), conf);
assertEquals("All datanodes must be alive", numDatanodes,
client.datanodeReport(DatanodeReportType.LIVE).length);
assertNull(checkFile(fileSys, file1, replicas, decomNode.getXferAddr(),
numDatanodes));
cleanupFile(fileSys, file1);
// Restart the cluster and ensure recommissioned datanodes
// are allowed to register with the namenode
cluster.shutdown();
startCluster(1, 4, conf);
cluster.shutdown();
}
/**
* Test decommission for federeated cluster
*/
@Test(timeout=360000)
public void testDecommissionFederation() throws IOException {
testDecommission(2, 2);
}
/**
* Test decommission process on standby NN.
* Verify admins can run "dfsadmin -refreshNodes" on SBN and decomm
* process can finish as long as admins run "dfsadmin -refreshNodes"
* on active NN.
* SBN used to mark excess replica upon recommission. The SBN's pick
* for excess replica could be different from the one picked by ANN.
* That creates inconsistent state and prevent SBN from finishing
* decommission.
*/
@Test(timeout=360000)
public void testDecommissionOnStandby() throws Exception {
Configuration hdfsConf = new HdfsConfiguration(conf);
hdfsConf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
hdfsConf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 30000);
hdfsConf.setInt(DFSConfigKeys.DFS_NAMENODE_TOLERATE_HEARTBEAT_MULTIPLIER_KEY, 2);
// The time to wait so that the slow DN's heartbeat is considered old
// by BlockPlacementPolicyDefault and thus will choose that DN for
// excess replica.
long slowHeartbeatDNwaitTime =
hdfsConf.getLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT) * 1000 * (hdfsConf.getInt(
DFSConfigKeys.DFS_NAMENODE_TOLERATE_HEARTBEAT_MULTIPLIER_KEY,
DFSConfigKeys.DFS_NAMENODE_TOLERATE_HEARTBEAT_MULTIPLIER_DEFAULT) + 1);
cluster = new MiniDFSCluster.Builder(hdfsConf)
.nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(3).build();
cluster.transitionToActive(0);
cluster.waitActive();
// Step 1, create a cluster with 4 DNs. Blocks are stored on the first 3 DNs.
// The last DN is empty. Also configure the last DN to have slow heartbeat
// so that it will be chosen as excess replica candidate during recommission.
// Step 1.a, copy blocks to the first 3 DNs. Given the replica count is the
// same as # of DNs, each DN will have a replica for any block.
Path file1 = new Path("testDecommissionHA.dat");
int replicas = 3;
FileSystem activeFileSys = cluster.getFileSystem(0);
writeFile(activeFileSys, file1, replicas);
HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(0),
cluster.getNameNode(1));
// Step 1.b, start a DN with slow heartbeat, so that we can know for sure it
// will be chosen as the target of excess replica during recommission.
hdfsConf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 30);
cluster.startDataNodes(hdfsConf, 1, true, null, null, null);
DataNode lastDN = cluster.getDataNodes().get(3);
lastDN.getDatanodeUuid();
// Step 2, decommission the first DN at both ANN and SBN.
DataNode firstDN = cluster.getDataNodes().get(0);
// Step 2.a, ask ANN to decomm the first DN
DatanodeInfo decommissionedNodeFromANN = decommissionNode(
0, firstDN.getDatanodeUuid(), null, AdminStates.DECOMMISSIONED);
// Step 2.b, ask SBN to decomm the first DN
DatanodeInfo decomNodeFromSBN = decommissionNode(1, firstDN.getDatanodeUuid(), null,
AdminStates.DECOMMISSIONED);
// Step 3, recommission the first DN on SBN and ANN to create excess replica
// It recommissions the node on SBN first to create potential
// inconsistent state. In production cluster, such insistent state can happen
// even if recommission command was issued on ANN first given the async nature
// of the system.
// Step 3.a, ask SBN to recomm the first DN.
// SBN has been fixed so that it no longer invalidates excess replica during
// recommission.
// Before the fix, SBN could get into the following state.
// 1. the last DN would have been chosen as excess replica, given its
// heartbeat is considered old.
// Please refer to BlockPlacementPolicyDefault#chooseReplicaToDelete
// 2. After recommissionNode finishes, SBN has 3 live replicas ( 0, 1, 2 )
// and one excess replica ( 3 )
// After the fix,
// After recommissionNode finishes, SBN has 4 live replicas ( 0, 1, 2, 3 )
Thread.sleep(slowHeartbeatDNwaitTime);
recommissionNode(1, decomNodeFromSBN);
// Step 3.b, ask ANN to recommission the first DN.
// To verify the fix, the test makes sure the excess replica picked by ANN
// is different from the one picked by SBN before the fix.
// To achieve that, we make sure next-to-last DN is chosen as excess replica
// by ANN.
// 1. restore LastDNprop's heartbeat interval.
// 2. Make next-to-last DN's heartbeat slow.
MiniDFSCluster.DataNodeProperties LastDNprop = cluster.stopDataNode(3);
LastDNprop.conf.setLong(
DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, HEARTBEAT_INTERVAL);
cluster.restartDataNode(LastDNprop);
MiniDFSCluster.DataNodeProperties nextToLastDNprop = cluster.stopDataNode(2);
nextToLastDNprop.conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 30);
cluster.restartDataNode(nextToLastDNprop);
cluster.waitActive();
Thread.sleep(slowHeartbeatDNwaitTime);
recommissionNode(0, decommissionedNodeFromANN);
// Step 3.c, make sure the DN has deleted the block and report to NNs
cluster.triggerHeartbeats();
HATestUtil.waitForDNDeletions(cluster);
cluster.triggerDeletionReports();
// Step 4, decommission the first DN on both ANN and SBN
// With the fix to make sure SBN no longer marks excess replica
// during recommission, SBN's decommission can finish properly
decommissionNode(0, firstDN.getDatanodeUuid(), null,
AdminStates.DECOMMISSIONED);
// Ask SBN to decomm the first DN
decommissionNode(1, firstDN.getDatanodeUuid(), null,
AdminStates.DECOMMISSIONED);
cluster.shutdown();
}
private void testDecommission(int numNamenodes, int numDatanodes)
throws IOException {
LOG.info("Starting test testDecommission");
startCluster(numNamenodes, numDatanodes, conf);
ArrayList<ArrayList<DatanodeInfo>> namenodeDecomList =
new ArrayList<ArrayList<DatanodeInfo>>(numNamenodes);
for(int i = 0; i < numNamenodes; i++) {
namenodeDecomList.add(i, new ArrayList<DatanodeInfo>(numDatanodes));
}
Path file1 = new Path("testDecommission.dat");
for (int iteration = 0; iteration < numDatanodes - 1; iteration++) {
int replicas = numDatanodes - iteration - 1;
// Start decommissioning one namenode at a time
for (int i = 0; i < numNamenodes; i++) {
ArrayList<DatanodeInfo> decommissionedNodes = namenodeDecomList.get(i);
FileSystem fileSys = cluster.getFileSystem(i);
FSNamesystem ns = cluster.getNamesystem(i);
writeFile(fileSys, file1, replicas);
int deadDecomissioned = ns.getNumDecomDeadDataNodes();
int liveDecomissioned = ns.getNumDecomLiveDataNodes();
// Decommission one node. Verify that node is decommissioned.
DatanodeInfo decomNode = decommissionNode(i, null, decommissionedNodes,
AdminStates.DECOMMISSIONED);
decommissionedNodes.add(decomNode);
assertEquals(deadDecomissioned, ns.getNumDecomDeadDataNodes());
assertEquals(liveDecomissioned + 1, ns.getNumDecomLiveDataNodes());
// Ensure decommissioned datanode is not automatically shutdown
DFSClient client = getDfsClient(cluster.getNameNode(i), conf);
assertEquals("All datanodes must be alive", numDatanodes,
client.datanodeReport(DatanodeReportType.LIVE).length);
// wait for the block to be replicated
int tries = 0;
while (tries++ < 20) {
try {
Thread.sleep(1000);
if (checkFile(fileSys, file1, replicas, decomNode.getXferAddr(),
numDatanodes) == null) {
break;
}
} catch (InterruptedException ie) {
}
}
assertTrue("Checked if block was replicated after decommission, tried "
+ tries + " times.", tries < 20);
cleanupFile(fileSys, file1);
}
}
// Restart the cluster and ensure decommissioned datanodes
// are allowed to register with the namenode
cluster.shutdown();
startCluster(numNamenodes, numDatanodes, conf);
cluster.shutdown();
}
/**
* Test that over-replicated blocks are deleted on recommission.
*/
@Test(timeout=120000)
public void testRecommission() throws Exception {
final int numDatanodes = 6;
try {
LOG.info("Starting test testRecommission");
startCluster(1, numDatanodes, conf);
final Path file1 = new Path("testDecommission.dat");
final int replicas = numDatanodes - 1;
ArrayList<DatanodeInfo> decommissionedNodes = Lists.newArrayList();
final FileSystem fileSys = cluster.getFileSystem();
// Write a file to n-1 datanodes
writeFile(fileSys, file1, replicas);
// Decommission one of the datanodes with a replica
BlockLocation loc = fileSys.getFileBlockLocations(file1, 0, 1)[0];
assertEquals("Unexpected number of replicas from getFileBlockLocations",
replicas, loc.getHosts().length);
final String toDecomHost = loc.getNames()[0];
String toDecomUuid = null;
for (DataNode d : cluster.getDataNodes()) {
if (d.getDatanodeId().getXferAddr().equals(toDecomHost)) {
toDecomUuid = d.getDatanodeId().getDatanodeUuid();
break;
}
}
assertNotNull("Could not find a dn with the block!", toDecomUuid);
final DatanodeInfo decomNode =
decommissionNode(0, toDecomUuid, decommissionedNodes,
AdminStates.DECOMMISSIONED);
decommissionedNodes.add(decomNode);
final BlockManager blockManager =
cluster.getNamesystem().getBlockManager();
final DatanodeManager datanodeManager =
blockManager.getDatanodeManager();
BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
// Ensure decommissioned datanode is not automatically shutdown
DFSClient client = getDfsClient(cluster.getNameNode(), conf);
assertEquals("All datanodes must be alive", numDatanodes,
client.datanodeReport(DatanodeReportType.LIVE).length);
// wait for the block to be replicated
final ExtendedBlock b = DFSTestUtil.getFirstBlock(fileSys, file1);
final String uuid = toDecomUuid;
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
BlockInfo info =
blockManager.getStoredBlock(b.getLocalBlock());
int count = 0;
StringBuilder sb = new StringBuilder("Replica locations: ");
for (int i = 0; i < info.numNodes(); i++) {
DatanodeDescriptor dn = info.getDatanode(i);
sb.append(dn + ", ");
if (!dn.getDatanodeUuid().equals(uuid)) {
count++;
}
}
LOG.info(sb.toString());
LOG.info("Count: " + count);
return count == replicas;
}
}, 500, 30000);
// redecommission and wait for over-replication to be fixed
recommissionNode(0, decomNode);
BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
DFSTestUtil.waitForReplication(cluster, b, 1, replicas, 0);
cleanupFile(fileSys, file1);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
/**
* Tests cluster storage statistics during decommissioning for non
* federated cluster
*/
@Test(timeout=360000)
public void testClusterStats() throws Exception {
testClusterStats(1);
}
/**
* Tests cluster storage statistics during decommissioning for
* federated cluster
*/
@Test(timeout=360000)
public void testClusterStatsFederation() throws Exception {
testClusterStats(3);
}
public void testClusterStats(int numNameNodes) throws IOException,
InterruptedException {
LOG.info("Starting test testClusterStats");
int numDatanodes = 1;
startCluster(numNameNodes, numDatanodes, conf);
for (int i = 0; i < numNameNodes; i++) {
FileSystem fileSys = cluster.getFileSystem(i);
Path file = new Path("testClusterStats.dat");
writeFile(fileSys, file, 1);
FSNamesystem fsn = cluster.getNamesystem(i);
NameNode namenode = cluster.getNameNode(i);
DatanodeInfo decomInfo = decommissionNode(i, null, null,
AdminStates.DECOMMISSION_INPROGRESS);
DataNode decomNode = getDataNode(decomInfo);
// Check namenode stats for multiple datanode heartbeats
verifyStats(namenode, fsn, decomInfo, decomNode, true);
// Stop decommissioning and verify stats
writeConfigFile(excludeFile, null);
refreshNodes(fsn, conf);
DatanodeInfo retInfo = NameNodeAdapter.getDatanode(fsn, decomInfo);
DataNode retNode = getDataNode(decomInfo);
waitNodeState(retInfo, AdminStates.NORMAL);
verifyStats(namenode, fsn, retInfo, retNode, false);
}
}
private DataNode getDataNode(DatanodeInfo decomInfo) {
DataNode decomNode = null;
for (DataNode dn: cluster.getDataNodes()) {
if (decomInfo.equals(dn.getDatanodeId())) {
decomNode = dn;
break;
}
}
assertNotNull("Could not find decomNode in cluster!", decomNode);
return decomNode;
}
/**
* Test host/include file functionality. Only datanodes
* in the include file are allowed to connect to the namenode in a non
* federated cluster.
*/
@Test(timeout=360000)
public void testHostsFile() throws IOException, InterruptedException {
// Test for a single namenode cluster
testHostsFile(1);
}
/**
* Test host/include file functionality. Only datanodes
* in the include file are allowed to connect to the namenode in a
* federated cluster.
*/
@Test(timeout=360000)
public void testHostsFileFederation() throws IOException, InterruptedException {
// Test for 3 namenode federated cluster
testHostsFile(3);
}
public void testHostsFile(int numNameNodes) throws IOException,
InterruptedException {
int numDatanodes = 1;
cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleFederatedTopology(numNameNodes))
.numDataNodes(numDatanodes).setupHostsFile(true).build();
cluster.waitActive();
// Now empty hosts file and ensure the datanode is disallowed
// from talking to namenode, resulting in it's shutdown.
ArrayList<String>list = new ArrayList<String>();
final String bogusIp = "127.0.30.1";
list.add(bogusIp);
writeConfigFile(hostsFile, list);
for (int j = 0; j < numNameNodes; j++) {
refreshNodes(cluster.getNamesystem(j), conf);
DFSClient client = getDfsClient(cluster.getNameNode(j), conf);
DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
for (int i = 0 ; i < 5 && info.length != 0; i++) {
LOG.info("Waiting for datanode to be marked dead");
Thread.sleep(HEARTBEAT_INTERVAL * 1000);
info = client.datanodeReport(DatanodeReportType.LIVE);
}
assertEquals("Number of live nodes should be 0", 0, info.length);
// Test that non-live and bogus hostnames are considered "dead".
// The dead report should have an entry for (1) the DN that is
// now considered dead because it is no longer allowed to connect
// and (2) the bogus entry in the hosts file (these entries are
// always added last)
info = client.datanodeReport(DatanodeReportType.DEAD);
assertEquals("There should be 2 dead nodes", 2, info.length);
DatanodeID id = cluster.getDataNodes().get(0).getDatanodeId();
assertEquals(id.getHostName(), info[0].getHostName());
assertEquals(bogusIp, info[1].getHostName());
}
}
@Test(timeout=120000)
public void testDecommissionWithOpenfile() throws IOException, InterruptedException {
LOG.info("Starting test testDecommissionWithOpenfile");
//At most 4 nodes will be decommissioned
startCluster(1, 7, conf);
FileSystem fileSys = cluster.getFileSystem(0);
FSNamesystem ns = cluster.getNamesystem(0);
String openFile = "/testDecommissionWithOpenfile.dat";
writeFile(fileSys, new Path(openFile), (short)3);
// make sure the file was open for write
FSDataOutputStream fdos = fileSys.append(new Path(openFile));
LocatedBlocks lbs = NameNodeAdapter.getBlockLocations(cluster.getNameNode(0), openFile, 0, fileSize);
DatanodeInfo[] dnInfos4LastBlock = lbs.getLastLocatedBlock().getLocations();
DatanodeInfo[] dnInfos4FirstBlock = lbs.get(0).getLocations();
ArrayList<String> nodes = new ArrayList<String>();
ArrayList<DatanodeInfo> dnInfos = new ArrayList<DatanodeInfo>();
DatanodeManager dm = ns.getBlockManager().getDatanodeManager();
for (DatanodeInfo datanodeInfo : dnInfos4FirstBlock) {
DatanodeInfo found = datanodeInfo;
for (DatanodeInfo dif: dnInfos4LastBlock) {
if (datanodeInfo.equals(dif)) {
found = null;
}
}
if (found != null) {
nodes.add(found.getXferAddr());
dnInfos.add(dm.getDatanode(found));
}
}
//decommission one of the 3 nodes which have last block
nodes.add(dnInfos4LastBlock[0].getXferAddr());
dnInfos.add(dm.getDatanode(dnInfos4LastBlock[0]));
writeConfigFile(excludeFile, nodes);
refreshNodes(ns, conf);
for (DatanodeInfo dn : dnInfos) {
waitNodeState(dn, AdminStates.DECOMMISSIONED);
}
fdos.close();
}
/**
* Tests restart of namenode while datanode hosts are added to exclude file
**/
@Test(timeout=360000)
public void testDecommissionWithNamenodeRestart()throws IOException, InterruptedException {
LOG.info("Starting test testDecommissionWithNamenodeRestart");
int numNamenodes = 1;
int numDatanodes = 1;
int replicas = 1;
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,
DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_DEFAULT);
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY, 5);
startCluster(numNamenodes, numDatanodes, conf);
Path file1 = new Path("testDecommissionWithNamenodeRestart.dat");
FileSystem fileSys = cluster.getFileSystem();
writeFile(fileSys, file1, replicas);
DFSClient client = getDfsClient(cluster.getNameNode(), conf);
DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
DatanodeID excludedDatanodeID = info[0];
String excludedDatanodeName = info[0].getXferAddr();
writeConfigFile(excludeFile, new ArrayList<String>(Arrays.asList(excludedDatanodeName)));
//Add a new datanode to cluster
cluster.startDataNodes(conf, 1, true, null, null, null, null);
numDatanodes+=1;
assertEquals("Number of datanodes should be 2 ", 2, cluster.getDataNodes().size());
//Restart the namenode
cluster.restartNameNode();
DatanodeInfo datanodeInfo = NameNodeAdapter.getDatanode(
cluster.getNamesystem(), excludedDatanodeID);
waitNodeState(datanodeInfo, AdminStates.DECOMMISSIONED);
// Ensure decommissioned datanode is not automatically shutdown
assertEquals("All datanodes must be alive", numDatanodes,
client.datanodeReport(DatanodeReportType.LIVE).length);
assertTrue("Checked if block was replicated after decommission.",
checkFile(fileSys, file1, replicas, datanodeInfo.getXferAddr(),
numDatanodes) == null);
cleanupFile(fileSys, file1);
// Restart the cluster and ensure recommissioned datanodes
// are allowed to register with the namenode
cluster.shutdown();
startCluster(numNamenodes, numDatanodes, conf);
cluster.shutdown();
}
/**
* Test using a "registration name" in a host include file.
*
* Registration names are DataNode names specified in the configuration by
* dfs.datanode.hostname. The DataNode will send this name to the NameNode
* as part of its registration. Registration names are helpful when you
* want to override the normal first result of DNS resolution on the
* NameNode. For example, a given datanode IP may map to two hostnames,
* and you may want to choose which hostname is used internally in the
* cluster.
*
* It is not recommended to use a registration name which is not also a
* valid DNS hostname for the DataNode. See HDFS-5237 for background.
*/
@Ignore
@Test(timeout=360000)
public void testIncludeByRegistrationName() throws Exception {
Configuration hdfsConf = new Configuration(conf);
// Any IPv4 address starting with 127 functions as a "loopback" address
// which is connected to the current host. So by choosing 127.0.0.100
// as our registration name, we have chosen a name which is also a valid
// way of reaching the local DataNode we're going to start.
// Typically, a registration name would be a hostname, but we don't want
// to deal with DNS in this test.
final String registrationName = "127.0.0.100";
final String nonExistentDn = "127.0.0.10";
hdfsConf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, registrationName);
cluster = new MiniDFSCluster.Builder(hdfsConf)
.numDataNodes(1).checkDataNodeHostConfig(true)
.setupHostsFile(true).build();
cluster.waitActive();
// Set up an includes file that doesn't have our datanode.
ArrayList<String> nodes = new ArrayList<String>();
nodes.add(nonExistentDn);
writeConfigFile(hostsFile, nodes);
refreshNodes(cluster.getNamesystem(0), hdfsConf);
// Wait for the DN to be marked dead.
LOG.info("Waiting for DN to be marked as dead.");
final DFSClient client = getDfsClient(cluster.getNameNode(0), hdfsConf);
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
BlockManagerTestUtil
.checkHeartbeat(cluster.getNamesystem().getBlockManager());
try {
DatanodeInfo info[] = client.datanodeReport(DatanodeReportType.DEAD);
return info.length == 1;
} catch (IOException e) {
LOG.warn("Failed to check dead DNs", e);
return false;
}
}
}, 500, 5000);
// Use a non-empty include file with our registration name.
// It should work.
int dnPort = cluster.getDataNodes().get(0).getXferPort();
nodes = new ArrayList<String>();
nodes.add(registrationName + ":" + dnPort);
writeConfigFile(hostsFile, nodes);
refreshNodes(cluster.getNamesystem(0), hdfsConf);
cluster.restartDataNode(0);
cluster.triggerHeartbeats();
// Wait for the DN to come back.
LOG.info("Waiting for DN to come back.");
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
BlockManagerTestUtil
.checkHeartbeat(cluster.getNamesystem().getBlockManager());
try {
DatanodeInfo info[] = client.datanodeReport(DatanodeReportType.LIVE);
if (info.length == 1) {
Assert.assertFalse(info[0].isDecommissioned());
Assert.assertFalse(info[0].isDecommissionInProgress());
assertEquals(registrationName, info[0].getHostName());
return true;
}
} catch (IOException e) {
LOG.warn("Failed to check dead DNs", e);
}
return false;
}
}, 500, 5000);
}
@Test(timeout=120000)
public void testBlocksPerInterval() throws Exception {
Configuration newConf = new Configuration(conf);
org.apache.log4j.Logger.getLogger(DecommissionManager.class)
.setLevel(Level.TRACE);
// Turn the blocks per interval way down
newConf.setInt(
DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY,
3);
// Disable the normal monitor runs
newConf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY,
Integer.MAX_VALUE);
startCluster(1, 3, newConf);
final FileSystem fs = cluster.getFileSystem();
final DatanodeManager datanodeManager =
cluster.getNamesystem().getBlockManager().getDatanodeManager();
final DecommissionManager decomManager = datanodeManager.getDecomManager();
// Write a 3 block file, so each node has one block. Should scan 3 nodes.
DFSTestUtil.createFile(fs, new Path("/file1"), 64, (short) 3, 0xBAD1DEA);
doDecomCheck(datanodeManager, decomManager, 3);
// Write another file, should only scan two
DFSTestUtil.createFile(fs, new Path("/file2"), 64, (short)3, 0xBAD1DEA);
doDecomCheck(datanodeManager, decomManager, 2);
// One more file, should only scan 1
DFSTestUtil.createFile(fs, new Path("/file3"), 64, (short)3, 0xBAD1DEA);
doDecomCheck(datanodeManager, decomManager, 1);
// blocks on each DN now exceeds limit, still scan at least one node
DFSTestUtil.createFile(fs, new Path("/file4"), 64, (short)3, 0xBAD1DEA);
doDecomCheck(datanodeManager, decomManager, 1);
}
@Deprecated
@Test(timeout=120000)
public void testNodesPerInterval() throws Exception {
Configuration newConf = new Configuration(conf);
org.apache.log4j.Logger.getLogger(DecommissionManager.class)
.setLevel(Level.TRACE);
// Set the deprecated configuration key which limits the # of nodes per
// interval
newConf.setInt("dfs.namenode.decommission.nodes.per.interval", 1);
// Disable the normal monitor runs
newConf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY,
Integer.MAX_VALUE);
startCluster(1, 3, newConf);
final FileSystem fs = cluster.getFileSystem();
final DatanodeManager datanodeManager =
cluster.getNamesystem().getBlockManager().getDatanodeManager();
final DecommissionManager decomManager = datanodeManager.getDecomManager();
// Write a 3 block file, so each node has one block. Should scan 1 node
// each time.
DFSTestUtil.createFile(fs, new Path("/file1"), 64, (short) 3, 0xBAD1DEA);
for (int i=0; i<3; i++) {
doDecomCheck(datanodeManager, decomManager, 1);
}
}
private void doDecomCheck(DatanodeManager datanodeManager,
DecommissionManager decomManager, int expectedNumCheckedNodes)
throws IOException, ExecutionException, InterruptedException {
// Decom all nodes
ArrayList<DatanodeInfo> decommissionedNodes = Lists.newArrayList();
for (DataNode d: cluster.getDataNodes()) {
DatanodeInfo dn = decommissionNode(0, d.getDatanodeUuid(),
decommissionedNodes,
AdminStates.DECOMMISSION_INPROGRESS);
decommissionedNodes.add(dn);
}
// Run decom scan and check
BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
assertEquals("Unexpected # of nodes checked", expectedNumCheckedNodes,
decomManager.getNumNodesChecked());
// Recommission all nodes
for (DatanodeInfo dn : decommissionedNodes) {
recommissionNode(0, dn);
}
}
@Test(timeout=120000)
public void testPendingNodes() throws Exception {
Configuration newConf = new Configuration(conf);
org.apache.log4j.Logger.getLogger(DecommissionManager.class)
.setLevel(Level.TRACE);
// Only allow one node to be decom'd at a time
newConf.setInt(
DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES,
1);
// Disable the normal monitor runs
newConf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY,
Integer.MAX_VALUE);
startCluster(1, 3, newConf);
final FileSystem fs = cluster.getFileSystem();
final DatanodeManager datanodeManager =
cluster.getNamesystem().getBlockManager().getDatanodeManager();
final DecommissionManager decomManager = datanodeManager.getDecomManager();
// Keep a file open to prevent decom from progressing
HdfsDataOutputStream open1 =
(HdfsDataOutputStream) fs.create(new Path("/openFile1"), (short)3);
// Flush and trigger block reports so the block definitely shows up on NN
open1.write(123);
open1.hflush();
for (DataNode d: cluster.getDataNodes()) {
DataNodeTestUtils.triggerBlockReport(d);
}
// Decom two nodes, so one is still alive
ArrayList<DatanodeInfo> decommissionedNodes = Lists.newArrayList();
for (int i=0; i<2; i++) {
final DataNode d = cluster.getDataNodes().get(i);
DatanodeInfo dn = decommissionNode(0, d.getDatanodeUuid(),
decommissionedNodes,
AdminStates.DECOMMISSION_INPROGRESS);
decommissionedNodes.add(dn);
}
for (int i=2; i>=0; i--) {
assertTrackedAndPending(decomManager, 0, i);
BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
}
// Close file, try to decom the last node, should get stuck in tracked
open1.close();
final DataNode d = cluster.getDataNodes().get(2);
DatanodeInfo dn = decommissionNode(0, d.getDatanodeUuid(),
decommissionedNodes,
AdminStates.DECOMMISSION_INPROGRESS);
decommissionedNodes.add(dn);
BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
assertTrackedAndPending(decomManager, 1, 0);
}
private void assertTrackedAndPending(DecommissionManager decomManager,
int tracked, int pending) {
assertEquals("Unexpected number of tracked nodes", tracked,
decomManager.getNumTrackedNodes());
assertEquals("Unexpected number of pending nodes", pending,
decomManager.getNumPendingNodes());
}
/**
* Decommissioned node should not be considered while calculating node usage
* @throws InterruptedException
*/
@Test
public void testNodeUsageAfterDecommissioned()
throws IOException, InterruptedException {
nodeUsageVerification(2, new long[] { 26384L, 26384L },
AdminStates.DECOMMISSIONED);
}
/**
* DECOMMISSION_INPROGRESS node should not be considered
* while calculating node usage
* @throws InterruptedException
*/
@Test
public void testNodeUsageWhileDecommissioining()
throws IOException, InterruptedException {
nodeUsageVerification(1, new long[] { 26384L },
AdminStates.DECOMMISSION_INPROGRESS);
}
@SuppressWarnings({ "unchecked" })
public void nodeUsageVerification(int numDatanodes, long[] nodesCapacity,
AdminStates decommissionState) throws IOException, InterruptedException {
Map<String, Map<String, String>> usage = null;
DatanodeInfo decommissionedNodeInfo = null;
String zeroNodeUsage = "0.00%";
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1);
FileSystem fileSys = null;
Path file1 = new Path("testNodeUsage.dat");
try {
SimulatedFSDataset.setFactory(conf);
cluster =
new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleFederatedTopology(1))
.numDataNodes(numDatanodes)
.simulatedCapacities(nodesCapacity).build();
cluster.waitActive();
DFSClient client = getDfsClient(cluster.getNameNode(0), conf);
validateCluster(client, numDatanodes);
ArrayList<ArrayList<DatanodeInfo>> namenodeDecomList =
new ArrayList<ArrayList<DatanodeInfo>>(1);
namenodeDecomList.add(0, new ArrayList<DatanodeInfo>(numDatanodes));
if (decommissionState == AdminStates.DECOMMISSIONED) {
// Move datanode1 to Decommissioned state
ArrayList<DatanodeInfo> decommissionedNode = namenodeDecomList.get(0);
decommissionedNodeInfo = decommissionNode(0, null,
decommissionedNode, decommissionState);
}
// Write a file(replica 1).Hence will be written to only one live node.
fileSys = cluster.getFileSystem(0);
FSNamesystem ns = cluster.getNamesystem(0);
writeFile(fileSys, file1, 1);
Thread.sleep(2000);
// min NodeUsage should not be 0.00%
usage = (Map<String, Map<String, String>>) JSON.parse(ns.getNodeUsage());
String minUsageBeforeDecom = usage.get("nodeUsage").get("min");
assertTrue(!minUsageBeforeDecom.equalsIgnoreCase(zeroNodeUsage));
if (decommissionState == AdminStates.DECOMMISSION_INPROGRESS) {
// Start decommissioning datanode
ArrayList<DatanodeInfo> decommissioningNodes = namenodeDecomList.
get(0);
decommissionedNodeInfo = decommissionNode(0, null,
decommissioningNodes, decommissionState);
// NodeUsage should not include DECOMMISSION_INPROGRESS node
// (minUsage should be 0.00%)
usage = (Map<String, Map<String, String>>)
JSON.parse(ns.getNodeUsage());
assertTrue(usage.get("nodeUsage").get("min").
equalsIgnoreCase(zeroNodeUsage));
}
// Recommission node
recommissionNode(0, decommissionedNodeInfo);
usage = (Map<String, Map<String, String>>) JSON.parse(ns.getNodeUsage());
String nodeusageAfterRecommi =
decommissionState == AdminStates.DECOMMISSION_INPROGRESS
? minUsageBeforeDecom
: zeroNodeUsage;
assertTrue(usage.get("nodeUsage").get("min").
equalsIgnoreCase(nodeusageAfterRecommi));
} finally {
cleanupFile(fileSys, file1);
cluster.shutdown();
}
}
}
| 50,252 | 39.042231 | 105 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationDelete.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.log4j.Level;
import org.junit.Test;
public class TestFileCreationDelete {
{
DFSTestUtil.setNameNodeLogLevel(Level.ALL);
}
@Test
public void testFileCreationDeleteParent() throws IOException {
Configuration conf = new HdfsConfiguration();
final int MAX_IDLE_TIME = 2000; // 2s
conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
// create cluster
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = null;
try {
cluster.waitActive();
fs = cluster.getFileSystem();
final int nnport = cluster.getNameNodePort();
// create file1.
Path dir = new Path("/foo");
Path file1 = new Path(dir, "file1");
FSDataOutputStream stm1 = TestFileCreation.createFile(fs, file1, 1);
System.out.println("testFileCreationDeleteParent: "
+ "Created file " + file1);
TestFileCreation.writeFile(stm1, 1000);
stm1.hflush();
// create file2.
Path file2 = new Path("/file2");
FSDataOutputStream stm2 = TestFileCreation.createFile(fs, file2, 1);
System.out.println("testFileCreationDeleteParent: "
+ "Created file " + file2);
TestFileCreation.writeFile(stm2, 1000);
stm2.hflush();
// rm dir
fs.delete(dir, true);
// restart cluster with the same namenode port as before.
// This ensures that leases are persisted in fsimage.
cluster.shutdown();
try {Thread.sleep(2*MAX_IDLE_TIME);} catch (InterruptedException e) {}
cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport)
.format(false)
.build();
cluster.waitActive();
// restart cluster yet again. This triggers the code to read in
// persistent leases from fsimage.
cluster.shutdown();
try {Thread.sleep(5000);} catch (InterruptedException e) {}
cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport)
.format(false)
.build();
cluster.waitActive();
fs = cluster.getFileSystem();
assertTrue(!fs.exists(file1));
assertTrue(fs.exists(file2));
} finally {
fs.close();
cluster.shutdown();
}
}
}
| 3,592 | 35.663265 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.io.OutputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.util.ThreadUtil;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
/**
* These tests make sure that DFSClient excludes writing data to
* a DN properly in case of errors.
*/
public class TestDFSClientExcludedNodes {
private MiniDFSCluster cluster;
private Configuration conf;
@Before
public void setUp() {
cluster = null;
conf = new HdfsConfiguration();
}
@After
public void tearDown() {
if (cluster != null) {
cluster.shutdown();
}
}
@Test(timeout=60000)
public void testExcludedNodes() throws IOException {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
FileSystem fs = cluster.getFileSystem();
Path filePath = new Path("/testExcludedNodes");
// kill a datanode
cluster.stopDataNode(AppendTestUtil.nextInt(3));
OutputStream out = fs.create(
filePath,
true,
4096,
(short) 3,
fs.getDefaultBlockSize(filePath)
);
out.write(20);
try {
out.close();
} catch (Exception e) {
fail("Single DN failure should not result in a block abort: \n" +
e.getMessage());
}
}
@Test(timeout=60000)
public void testExcludedNodesForgiveness() throws IOException {
// Forgive nodes in under 2.5s for this test case.
conf.setLong(
HdfsClientConfigKeys.Write.EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL_KEY,
2500);
// We'll be using a 512 bytes block size just for tests
// so making sure the checksum bytes too match it.
conf.setInt("io.bytes.per.checksum", 512);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
FileSystem fs = cluster.getFileSystem();
Path filePath = new Path("/testForgivingExcludedNodes");
// 256 bytes data chunk for writes
byte[] bytes = new byte[256];
for (int index=0; index<bytes.length; index++) {
bytes[index] = '0';
}
// File with a 512 bytes block size
FSDataOutputStream out = fs.create(filePath, true, 4096, (short) 3, 512);
// Write a block to all 3 DNs (2x256bytes).
out.write(bytes);
out.write(bytes);
out.hflush();
// Remove two DNs, to put them into the exclude list.
DataNodeProperties two = cluster.stopDataNode(2);
DataNodeProperties one = cluster.stopDataNode(1);
// Write another block.
// At this point, we have two nodes already in excluded list.
out.write(bytes);
out.write(bytes);
out.hflush();
// Bring back the older DNs, since they are gonna be forgiven only
// afterwards of this previous block write.
Assert.assertEquals(true, cluster.restartDataNode(one, true));
Assert.assertEquals(true, cluster.restartDataNode(two, true));
cluster.waitActive();
// Sleep for 5s, to let the excluded nodes be expired
// from the excludes list (i.e. forgiven after the configured wait period).
// [Sleeping just in case the restart of the DNs completed < 5s cause
// otherwise, we'll end up quickly excluding those again.]
ThreadUtil.sleepAtLeastIgnoreInterrupts(5000);
// Terminate the last good DN, to assert that there's no
// single-DN-available scenario, caused by not forgiving the other
// two by now.
cluster.stopDataNode(0);
try {
// Attempt writing another block, which should still pass
// cause the previous two should have been forgiven by now,
// while the last good DN added to excludes this time.
out.write(bytes);
out.hflush();
out.close();
} catch (Exception e) {
fail("Excluded DataNodes should be forgiven after a while and " +
"not cause file writing exception of: '" + e.getMessage() + "'");
}
}
}
| 4,986 | 31.809211 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.Random;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.test.MockitoUtil;
import org.apache.hadoop.util.Time;
import org.junit.Test;
import org.mockito.Mockito;
/**
* This class tests the access time on files.
*
*/
public class TestSetTimes {
static final long seed = 0xDEADBEEFL;
static final int blockSize = 8192;
static final int fileSize = 16384;
static final int numDatanodes = 1;
static final SimpleDateFormat dateForm = new SimpleDateFormat("yyyy-MM-dd HH:mm");
Random myrand = new Random();
Path hostsFile;
Path excludeFile;
private FSDataOutputStream writeFile(FileSystem fileSys, Path name, int repl)
throws IOException {
FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
(short) repl, blockSize);
byte[] buffer = new byte[fileSize];
Random rand = new Random(seed);
rand.nextBytes(buffer);
stm.write(buffer);
return stm;
}
private void cleanupFile(FileSystem fileSys, Path name) throws IOException {
assertTrue(fileSys.exists(name));
fileSys.delete(name, true);
assertTrue(!fileSys.exists(name));
}
private void printDatanodeReport(DatanodeInfo[] info) {
System.out.println("-------------------------------------------------");
for (int i = 0; i < info.length; i++) {
System.out.println(info[i].getDatanodeReport());
System.out.println();
}
}
/**
* Tests mod & access time in DFS.
*/
@Test
public void testTimes() throws IOException {
Configuration conf = new HdfsConfiguration();
final int MAX_IDLE_TIME = 2000; // 2s
conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(numDatanodes)
.build();
cluster.waitActive();
final int nnport = cluster.getNameNodePort();
InetSocketAddress addr = new InetSocketAddress("localhost",
cluster.getNameNodePort());
DFSClient client = new DFSClient(addr, conf);
DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
assertEquals("Number of Datanodes ", numDatanodes, info.length);
FileSystem fileSys = cluster.getFileSystem();
int replicas = 1;
assertTrue(fileSys instanceof DistributedFileSystem);
try {
//
// create file and record atime/mtime
//
System.out.println("Creating testdir1 and testdir1/test1.dat.");
Path dir1 = new Path("testdir1");
Path file1 = new Path(dir1, "test1.dat");
FSDataOutputStream stm = writeFile(fileSys, file1, replicas);
FileStatus stat = fileSys.getFileStatus(file1);
long atimeBeforeClose = stat.getAccessTime();
String adate = dateForm.format(new Date(atimeBeforeClose));
System.out.println("atime on " + file1 + " before close is " +
adate + " (" + atimeBeforeClose + ")");
assertTrue(atimeBeforeClose != 0);
stm.close();
stat = fileSys.getFileStatus(file1);
long atime1 = stat.getAccessTime();
long mtime1 = stat.getModificationTime();
adate = dateForm.format(new Date(atime1));
String mdate = dateForm.format(new Date(mtime1));
System.out.println("atime on " + file1 + " is " + adate +
" (" + atime1 + ")");
System.out.println("mtime on " + file1 + " is " + mdate +
" (" + mtime1 + ")");
assertTrue(atime1 != 0);
//
// record dir times
//
stat = fileSys.getFileStatus(dir1);
long mdir1 = stat.getAccessTime();
assertTrue(mdir1 == 0);
// set the access time to be one day in the past
long atime2 = atime1 - (24L * 3600L * 1000L);
fileSys.setTimes(file1, -1, atime2);
// check new access time on file
stat = fileSys.getFileStatus(file1);
long atime3 = stat.getAccessTime();
String adate3 = dateForm.format(new Date(atime3));
System.out.println("new atime on " + file1 + " is " +
adate3 + " (" + atime3 + ")");
assertTrue(atime2 == atime3);
assertTrue(mtime1 == stat.getModificationTime());
// set the modification time to be 1 hour in the past
long mtime2 = mtime1 - (3600L * 1000L);
fileSys.setTimes(file1, mtime2, -1);
// check new modification time on file
stat = fileSys.getFileStatus(file1);
long mtime3 = stat.getModificationTime();
String mdate3 = dateForm.format(new Date(mtime3));
System.out.println("new mtime on " + file1 + " is " +
mdate3 + " (" + mtime3 + ")");
assertTrue(atime2 == stat.getAccessTime());
assertTrue(mtime2 == mtime3);
long mtime4 = Time.now() - (3600L * 1000L);
long atime4 = Time.now();
fileSys.setTimes(dir1, mtime4, atime4);
// check new modification time on file
stat = fileSys.getFileStatus(dir1);
assertTrue("Not matching the modification times", mtime4 == stat
.getModificationTime());
assertTrue("Not matching the access times", atime4 == stat
.getAccessTime());
Path nonExistingDir = new Path(dir1, "/nonExistingDir/");
try {
fileSys.setTimes(nonExistingDir, mtime4, atime4);
fail("Expecting FileNotFoundException");
} catch (FileNotFoundException e) {
assertTrue(e.getMessage().contains(
"File/Directory " + nonExistingDir.toString() + " does not exist."));
}
// shutdown cluster and restart
cluster.shutdown();
try {Thread.sleep(2*MAX_IDLE_TIME);} catch (InterruptedException e) {}
cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport)
.format(false)
.build();
cluster.waitActive();
fileSys = cluster.getFileSystem();
// verify that access times and modification times persist after a
// cluster restart.
System.out.println("Verifying times after cluster restart");
stat = fileSys.getFileStatus(file1);
assertTrue(atime2 == stat.getAccessTime());
assertTrue(mtime3 == stat.getModificationTime());
cleanupFile(fileSys, file1);
cleanupFile(fileSys, dir1);
} catch (IOException e) {
info = client.datanodeReport(DatanodeReportType.ALL);
printDatanodeReport(info);
throw e;
} finally {
fileSys.close();
cluster.shutdown();
}
}
/**
* Tests mod time change at close in DFS.
*/
@Test
public void testTimesAtClose() throws IOException {
Configuration conf = new HdfsConfiguration();
final int MAX_IDLE_TIME = 2000; // 2s
int replicas = 1;
// parameter initialization
conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 50);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(numDatanodes)
.build();
cluster.waitActive();
InetSocketAddress addr = new InetSocketAddress("localhost",
cluster.getNameNodePort());
DFSClient client = new DFSClient(addr, conf);
DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
assertEquals("Number of Datanodes ", numDatanodes, info.length);
FileSystem fileSys = cluster.getFileSystem();
assertTrue(fileSys instanceof DistributedFileSystem);
try {
// create a new file and write to it
Path file1 = new Path("/simple.dat");
FSDataOutputStream stm = writeFile(fileSys, file1, replicas);
System.out.println("Created and wrote file simple.dat");
FileStatus statBeforeClose = fileSys.getFileStatus(file1);
long mtimeBeforeClose = statBeforeClose.getModificationTime();
String mdateBeforeClose = dateForm.format(new Date(
mtimeBeforeClose));
System.out.println("mtime on " + file1 + " before close is "
+ mdateBeforeClose + " (" + mtimeBeforeClose + ")");
assertTrue(mtimeBeforeClose != 0);
//close file after writing
stm.close();
System.out.println("Closed file.");
FileStatus statAfterClose = fileSys.getFileStatus(file1);
long mtimeAfterClose = statAfterClose.getModificationTime();
String mdateAfterClose = dateForm.format(new Date(mtimeAfterClose));
System.out.println("mtime on " + file1 + " after close is "
+ mdateAfterClose + " (" + mtimeAfterClose + ")");
assertTrue(mtimeAfterClose != 0);
assertTrue(mtimeBeforeClose != mtimeAfterClose);
cleanupFile(fileSys, file1);
} catch (IOException e) {
info = client.datanodeReport(DatanodeReportType.ALL);
printDatanodeReport(info);
throw e;
} finally {
fileSys.close();
cluster.shutdown();
}
}
/**
* Test that when access time updates are not needed, the FSNamesystem
* write lock is not taken by getBlockLocations.
* Regression test for HDFS-3981.
*/
@Test(timeout=60000)
public void testGetBlockLocationsOnlyUsesReadLock() throws IOException {
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 100*1000);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(0)
.build();
ReentrantReadWriteLock spyLock = NameNodeAdapter.spyOnFsLock(cluster.getNamesystem());
try {
// Create empty file in the FSN.
Path p = new Path("/empty-file");
DFSTestUtil.createFile(cluster.getFileSystem(), p, 0, (short)1, 0L);
// getBlockLocations() should not need the write lock, since we just created
// the file (and thus its access time is already within the 100-second
// accesstime precision configured above).
MockitoUtil.doThrowWhenCallStackMatches(
new AssertionError("Should not need write lock"),
".*getBlockLocations.*")
.when(spyLock).writeLock();
cluster.getFileSystem().getFileBlockLocations(p, 0, 100);
} finally {
cluster.shutdown();
}
}
public static void main(String[] args) throws Exception {
new TestSetTimes().testTimes();
}
}
| 12,460 | 38.433544 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.inOrder;
import static org.mockito.Mockito.mock;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.lang.reflect.Method;
import java.net.InetSocketAddress;
import java.net.ServerSocket;
import java.net.SocketTimeoutException;
import java.net.URI;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.EnumSet;
import java.util.List;
import java.util.Random;
import org.apache.commons.lang.ArrayUtils;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.BlockStorageLocation;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.FileChecksum;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
import org.apache.hadoop.fs.Options.ChecksumOpt;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.VolumeId;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.client.impl.LeaseRenewer;
import org.apache.hadoop.hdfs.net.Peer;
import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector;
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
import org.apache.hadoop.hdfs.web.HftpFileSystem;
import org.apache.hadoop.hdfs.web.WebHdfsConstants;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.Time;
import org.apache.log4j.Level;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.InOrder;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import com.google.common.base.Supplier;
import com.google.common.collect.Lists;
public class TestDistributedFileSystem {
private static final Random RAN = new Random();
{
((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
}
private boolean dualPortTesting = false;
private boolean noXmlDefaults = false;
private HdfsConfiguration getTestConfiguration() {
HdfsConfiguration conf;
if (noXmlDefaults) {
conf = new HdfsConfiguration(false);
String namenodeDir = new File(MiniDFSCluster.getBaseDirectory(), "name").getAbsolutePath();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, namenodeDir);
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, namenodeDir);
} else {
conf = new HdfsConfiguration();
}
if (dualPortTesting) {
conf.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
"localhost:0");
}
conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
return conf;
}
@Test
public void testEmptyDelegationToken() throws IOException {
Configuration conf = getTestConfiguration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
FileSystem fileSys = cluster.getFileSystem();
fileSys.getDelegationToken("");
} finally {
cluster.shutdown();
}
}
@Test
public void testFileSystemCloseAll() throws Exception {
Configuration conf = getTestConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
URI address = FileSystem.getDefaultUri(conf);
try {
FileSystem.closeAll();
conf = getTestConfiguration();
FileSystem.setDefaultUri(conf, address);
FileSystem.get(conf);
FileSystem.get(conf);
FileSystem.closeAll();
}
finally {
if (cluster != null) {cluster.shutdown();}
}
}
/**
* Tests DFSClient.close throws no ConcurrentModificationException if
* multiple files are open.
* Also tests that any cached sockets are closed. (HDFS-3359)
*/
@Test
public void testDFSClose() throws Exception {
Configuration conf = getTestConfiguration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fileSys = cluster.getFileSystem();
// create two files, leaving them open
fileSys.create(new Path("/test/dfsclose/file-0"));
fileSys.create(new Path("/test/dfsclose/file-1"));
// create another file, close it, and read it, so
// the client gets a socket in its SocketCache
Path p = new Path("/non-empty-file");
DFSTestUtil.createFile(fileSys, p, 1L, (short)1, 0L);
DFSTestUtil.readFile(fileSys, p);
fileSys.close();
} finally {
if (cluster != null) {cluster.shutdown();}
}
}
@Test
public void testDFSCloseOrdering() throws Exception {
DistributedFileSystem fs = new MyDistributedFileSystem();
Path path = new Path("/a");
fs.deleteOnExit(path);
fs.close();
InOrder inOrder = inOrder(fs.dfs);
inOrder.verify(fs.dfs).closeOutputStreams(eq(false));
inOrder.verify(fs.dfs).delete(eq(path.toString()), eq(true));
inOrder.verify(fs.dfs).close();
}
private static class MyDistributedFileSystem extends DistributedFileSystem {
MyDistributedFileSystem() {
statistics = new FileSystem.Statistics("myhdfs"); // can't mock finals
dfs = mock(DFSClient.class);
}
@Override
public boolean exists(Path p) {
return true; // trick out deleteOnExit
}
}
@Test
public void testDFSSeekExceptions() throws IOException {
Configuration conf = getTestConfiguration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fileSys = cluster.getFileSystem();
String file = "/test/fileclosethenseek/file-0";
Path path = new Path(file);
// create file
FSDataOutputStream output = fileSys.create(path);
output.writeBytes("Some test data to write longer than 10 bytes");
output.close();
FSDataInputStream input = fileSys.open(path);
input.seek(10);
boolean threw = false;
try {
input.seek(100);
} catch (IOException e) {
// success
threw = true;
}
assertTrue("Failed to throw IOE when seeking past end", threw);
input.close();
threw = false;
try {
input.seek(1);
} catch (IOException e) {
//success
threw = true;
}
assertTrue("Failed to throw IOE when seeking after close", threw);
fileSys.close();
}
finally {
if (cluster != null) {cluster.shutdown();}
}
}
@Test
public void testDFSClient() throws Exception {
Configuration conf = getTestConfiguration();
final long grace = 1000L;
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
final String filepathstring = "/test/LeaseChecker/foo";
final Path[] filepaths = new Path[4];
for(int i = 0; i < filepaths.length; i++) {
filepaths[i] = new Path(filepathstring + i);
}
final long millis = Time.now();
{
final DistributedFileSystem dfs = cluster.getFileSystem();
Method setMethod = dfs.dfs.getLeaseRenewer().getClass()
.getDeclaredMethod("setGraceSleepPeriod", long.class);
setMethod.setAccessible(true);
setMethod.invoke(dfs.dfs.getLeaseRenewer(), grace);
Method checkMethod = dfs.dfs.getLeaseRenewer().getClass()
.getDeclaredMethod("isRunning");
checkMethod.setAccessible(true);
assertFalse((boolean) checkMethod.invoke(dfs.dfs.getLeaseRenewer()));
{
//create a file
final FSDataOutputStream out = dfs.create(filepaths[0]);
assertTrue((boolean)checkMethod.invoke(dfs.dfs.getLeaseRenewer()));
//write something
out.writeLong(millis);
assertTrue((boolean)checkMethod.invoke(dfs.dfs.getLeaseRenewer()));
//close
out.close();
Thread.sleep(grace/4*3);
//within grace period
assertTrue((boolean)checkMethod.invoke(dfs.dfs.getLeaseRenewer()));
for(int i = 0; i < 3; i++) {
if ((boolean)checkMethod.invoke(dfs.dfs.getLeaseRenewer())) {
Thread.sleep(grace/2);
}
}
//passed grace period
assertFalse((boolean)checkMethod.invoke(dfs.dfs.getLeaseRenewer()));
}
{
//create file1
final FSDataOutputStream out1 = dfs.create(filepaths[1]);
assertTrue((boolean)checkMethod.invoke(dfs.dfs.getLeaseRenewer()));
//create file2
final FSDataOutputStream out2 = dfs.create(filepaths[2]);
assertTrue((boolean)checkMethod.invoke(dfs.dfs.getLeaseRenewer()));
//write something to file1
out1.writeLong(millis);
assertTrue((boolean)checkMethod.invoke(dfs.dfs.getLeaseRenewer()));
//close file1
out1.close();
assertTrue((boolean)checkMethod.invoke(dfs.dfs.getLeaseRenewer()));
//write something to file2
out2.writeLong(millis);
assertTrue((boolean)checkMethod.invoke(dfs.dfs.getLeaseRenewer()));
//close file2
out2.close();
Thread.sleep(grace/4*3);
//within grace period
assertTrue((boolean)checkMethod.invoke(dfs.dfs.getLeaseRenewer()));
}
{
//create file3
final FSDataOutputStream out3 = dfs.create(filepaths[3]);
assertTrue((boolean)checkMethod.invoke(dfs.dfs.getLeaseRenewer()));
Thread.sleep(grace/4*3);
//passed previous grace period, should still running
assertTrue((boolean)checkMethod.invoke(dfs.dfs.getLeaseRenewer()));
//write something to file3
out3.writeLong(millis);
assertTrue((boolean)checkMethod.invoke(dfs.dfs.getLeaseRenewer()));
//close file3
out3.close();
assertTrue((boolean)checkMethod.invoke(dfs.dfs.getLeaseRenewer()));
Thread.sleep(grace/4*3);
//within grace period
assertTrue((boolean)checkMethod.invoke(dfs.dfs.getLeaseRenewer()));
for(int i = 0; i < 3; i++) {
if ((boolean)checkMethod.invoke(dfs.dfs.getLeaseRenewer())) {
Thread.sleep(grace/2);
}
}
//passed grace period
assertFalse((boolean)checkMethod.invoke(dfs.dfs.getLeaseRenewer()));
}
dfs.close();
}
{
// Check to see if opening a non-existent file triggers a FNF
FileSystem fs = cluster.getFileSystem();
Path dir = new Path("/wrwelkj");
assertFalse("File should not exist for test.", fs.exists(dir));
try {
FSDataInputStream in = fs.open(dir);
try {
in.close();
fs.close();
} finally {
assertTrue("Did not get a FileNotFoundException for non-existing" +
" file.", false);
}
} catch (FileNotFoundException fnf) {
// This is the proper exception to catch; move on.
}
}
{
final DistributedFileSystem dfs = cluster.getFileSystem();
Method checkMethod = dfs.dfs.getLeaseRenewer().getClass()
.getDeclaredMethod("isRunning");
checkMethod.setAccessible(true);
assertFalse((boolean)checkMethod.invoke(dfs.dfs.getLeaseRenewer()));
//open and check the file
FSDataInputStream in = dfs.open(filepaths[0]);
assertFalse((boolean)checkMethod.invoke(dfs.dfs.getLeaseRenewer()));
assertEquals(millis, in.readLong());
assertFalse((boolean)checkMethod.invoke(dfs.dfs.getLeaseRenewer()));
in.close();
assertFalse((boolean)checkMethod.invoke(dfs.dfs.getLeaseRenewer()));
dfs.close();
}
{ // test accessing DFS with ip address. should work with any hostname
// alias or ip address that points to the interface that NameNode
// is listening on. In this case, it is localhost.
String uri = "hdfs://127.0.0.1:" + cluster.getNameNodePort() +
"/test/ipAddress/file";
Path path = new Path(uri);
FileSystem fs = FileSystem.get(path.toUri(), conf);
FSDataOutputStream out = fs.create(path);
byte[] buf = new byte[1024];
out.write(buf);
out.close();
FSDataInputStream in = fs.open(path);
in.readFully(buf);
in.close();
fs.close();
}
}
finally {
if (cluster != null) {cluster.shutdown();}
}
}
@Test
public void testStatistics() throws Exception {
int lsLimit = 2;
final Configuration conf = getTestConfiguration();
conf.setInt(DFSConfigKeys.DFS_LIST_LIMIT, lsLimit);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
final FileSystem fs = cluster.getFileSystem();
Path dir = new Path("/test");
Path file = new Path(dir, "file");
int readOps = DFSTestUtil.getStatistics(fs).getReadOps();
int writeOps = DFSTestUtil.getStatistics(fs).getWriteOps();
int largeReadOps = DFSTestUtil.getStatistics(fs).getLargeReadOps();
fs.mkdirs(dir);
checkStatistics(fs, readOps, ++writeOps, largeReadOps);
FSDataOutputStream out = fs.create(file, (short)1);
out.close();
checkStatistics(fs, readOps, ++writeOps, largeReadOps);
FileStatus status = fs.getFileStatus(file);
checkStatistics(fs, ++readOps, writeOps, largeReadOps);
fs.getFileBlockLocations(file, 0, 0);
checkStatistics(fs, ++readOps, writeOps, largeReadOps);
fs.getFileBlockLocations(status, 0, 0);
checkStatistics(fs, ++readOps, writeOps, largeReadOps);
FSDataInputStream in = fs.open(file);
in.close();
checkStatistics(fs, ++readOps, writeOps, largeReadOps);
fs.setReplication(file, (short)2);
checkStatistics(fs, readOps, ++writeOps, largeReadOps);
Path file1 = new Path(dir, "file1");
fs.rename(file, file1);
checkStatistics(fs, readOps, ++writeOps, largeReadOps);
fs.getContentSummary(file1);
checkStatistics(fs, ++readOps, writeOps, largeReadOps);
// Iterative ls test
for (int i = 0; i < 10; i++) {
Path p = new Path(dir, Integer.toString(i));
fs.mkdirs(p);
FileStatus[] list = fs.listStatus(dir);
if (list.length > lsLimit) {
// if large directory, then count readOps and largeReadOps by
// number times listStatus iterates
int iterations = (int)Math.ceil((double)list.length/lsLimit);
largeReadOps += iterations;
readOps += iterations;
} else {
// Single iteration in listStatus - no large read operation done
readOps++;
}
// writeOps incremented by 1 for mkdirs
// readOps and largeReadOps incremented by 1 or more
checkStatistics(fs, readOps, ++writeOps, largeReadOps);
}
fs.getStatus(file1);
checkStatistics(fs, ++readOps, writeOps, largeReadOps);
fs.getFileChecksum(file1);
checkStatistics(fs, ++readOps, writeOps, largeReadOps);
fs.setPermission(file1, new FsPermission((short)0777));
checkStatistics(fs, readOps, ++writeOps, largeReadOps);
fs.setTimes(file1, 0L, 0L);
checkStatistics(fs, readOps, ++writeOps, largeReadOps);
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
fs.setOwner(file1, ugi.getUserName(), ugi.getGroupNames()[0]);
checkStatistics(fs, readOps, ++writeOps, largeReadOps);
fs.delete(dir, true);
checkStatistics(fs, readOps, ++writeOps, largeReadOps);
} finally {
if (cluster != null) cluster.shutdown();
}
}
/** Checks statistics. -1 indicates do not check for the operations */
private void checkStatistics(FileSystem fs, int readOps, int writeOps, int largeReadOps) {
assertEquals(readOps, DFSTestUtil.getStatistics(fs).getReadOps());
assertEquals(writeOps, DFSTestUtil.getStatistics(fs).getWriteOps());
assertEquals(largeReadOps, DFSTestUtil.getStatistics(fs).getLargeReadOps());
}
@Test
public void testFileChecksum() throws Exception {
((Log4JLogger)HftpFileSystem.LOG).getLogger().setLevel(Level.ALL);
final long seed = RAN.nextLong();
System.out.println("seed=" + seed);
RAN.setSeed(seed);
final Configuration conf = getTestConfiguration();
conf.setBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
final FileSystem hdfs = cluster.getFileSystem();
final String nnAddr = conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
final UserGroupInformation current = UserGroupInformation.getCurrentUser();
final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
current.getShortUserName() + "x", new String[]{"user"});
try {
hdfs.getFileChecksum(new Path(
"/test/TestNonExistingFile"));
fail("Expecting FileNotFoundException");
} catch (FileNotFoundException e) {
assertTrue("Not throwing the intended exception message", e.getMessage()
.contains("File does not exist: /test/TestNonExistingFile"));
}
try {
Path path = new Path("/test/TestExistingDir/");
hdfs.mkdirs(path);
hdfs.getFileChecksum(path);
fail("Expecting FileNotFoundException");
} catch (FileNotFoundException e) {
assertTrue("Not throwing the intended exception message", e.getMessage()
.contains("Path is not a file: /test/TestExistingDir"));
}
//hftp
final String hftpuri = "hftp://" + nnAddr;
System.out.println("hftpuri=" + hftpuri);
final FileSystem hftp = ugi.doAs(
new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws Exception {
return new Path(hftpuri).getFileSystem(conf);
}
});
//webhdfs
final String webhdfsuri = WebHdfsConstants.WEBHDFS_SCHEME + "://" + nnAddr;
System.out.println("webhdfsuri=" + webhdfsuri);
final FileSystem webhdfs = ugi.doAs(
new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws Exception {
return new Path(webhdfsuri).getFileSystem(conf);
}
});
final Path dir = new Path("/filechecksum");
final int block_size = 1024;
final int buffer_size = conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096);
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 512);
//try different number of blocks
for(int n = 0; n < 5; n++) {
//generate random data
final byte[] data = new byte[RAN.nextInt(block_size/2-1)+n*block_size+1];
RAN.nextBytes(data);
System.out.println("data.length=" + data.length);
//write data to a file
final Path foo = new Path(dir, "foo" + n);
{
final FSDataOutputStream out = hdfs.create(foo, false, buffer_size,
(short)2, block_size);
out.write(data);
out.close();
}
//compute checksum
final FileChecksum hdfsfoocs = hdfs.getFileChecksum(foo);
System.out.println("hdfsfoocs=" + hdfsfoocs);
//hftp
final FileChecksum hftpfoocs = hftp.getFileChecksum(foo);
System.out.println("hftpfoocs=" + hftpfoocs);
final Path qualified = new Path(hftpuri + dir, "foo" + n);
final FileChecksum qfoocs = hftp.getFileChecksum(qualified);
System.out.println("qfoocs=" + qfoocs);
//webhdfs
final FileChecksum webhdfsfoocs = webhdfs.getFileChecksum(foo);
System.out.println("webhdfsfoocs=" + webhdfsfoocs);
final Path webhdfsqualified = new Path(webhdfsuri + dir, "foo" + n);
final FileChecksum webhdfs_qfoocs = webhdfs.getFileChecksum(webhdfsqualified);
System.out.println("webhdfs_qfoocs=" + webhdfs_qfoocs);
//create a zero byte file
final Path zeroByteFile = new Path(dir, "zeroByteFile" + n);
{
final FSDataOutputStream out = hdfs.create(zeroByteFile, false, buffer_size,
(short)2, block_size);
out.close();
}
// verify the magic val for zero byte files
{
final FileChecksum zeroChecksum = hdfs.getFileChecksum(zeroByteFile);
assertEquals(zeroChecksum.toString(),
"MD5-of-0MD5-of-0CRC32:70bc8f4b72a86921468bf8e8441dce51");
}
//write another file
final Path bar = new Path(dir, "bar" + n);
{
final FSDataOutputStream out = hdfs.create(bar, false, buffer_size,
(short)2, block_size);
out.write(data);
out.close();
}
{ //verify checksum
final FileChecksum barcs = hdfs.getFileChecksum(bar);
final int barhashcode = barcs.hashCode();
assertEquals(hdfsfoocs.hashCode(), barhashcode);
assertEquals(hdfsfoocs, barcs);
//hftp
assertEquals(hftpfoocs.hashCode(), barhashcode);
assertEquals(hftpfoocs, barcs);
assertEquals(qfoocs.hashCode(), barhashcode);
assertEquals(qfoocs, barcs);
//webhdfs
assertEquals(webhdfsfoocs.hashCode(), barhashcode);
assertEquals(webhdfsfoocs, barcs);
assertEquals(webhdfs_qfoocs.hashCode(), barhashcode);
assertEquals(webhdfs_qfoocs, barcs);
}
hdfs.setPermission(dir, new FsPermission((short)0));
{ //test permission error on hftp
try {
hftp.getFileChecksum(qualified);
fail();
} catch(IOException ioe) {
FileSystem.LOG.info("GOOD: getting an exception", ioe);
}
}
{ //test permission error on webhdfs
try {
webhdfs.getFileChecksum(webhdfsqualified);
fail();
} catch(IOException ioe) {
FileSystem.LOG.info("GOOD: getting an exception", ioe);
}
}
hdfs.setPermission(dir, new FsPermission((short)0777));
}
cluster.shutdown();
}
@Test
public void testAllWithDualPort() throws Exception {
dualPortTesting = true;
try {
testFileSystemCloseAll();
testDFSClose();
testDFSClient();
testFileChecksum();
} finally {
dualPortTesting = false;
}
}
@Test
public void testAllWithNoXmlDefaults() throws Exception {
// Do all the tests with a configuration that ignores the defaults in
// the XML files.
noXmlDefaults = true;
try {
testFileSystemCloseAll();
testDFSClose();
testDFSClient();
testFileChecksum();
} finally {
noXmlDefaults = false;
}
}
/**
* Tests the normal path of batching up BlockLocation[]s to be passed to a
* single
* {@link DistributedFileSystem#getFileBlockStorageLocations(java.util.List)}
* call
*/
@Test(timeout=60000)
public void testGetFileBlockStorageLocationsBatching() throws Exception {
final Configuration conf = getTestConfiguration();
((Log4JLogger)ProtobufRpcEngine.LOG).getLogger().setLevel(Level.TRACE);
((Log4JLogger)BlockStorageLocationUtil.LOG).getLogger().setLevel(Level.TRACE);
((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.TRACE);
conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED,
true);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(2).build();
try {
final DistributedFileSystem fs = cluster.getFileSystem();
// Create two files
final Path tmpFile1 = new Path("/tmpfile1.dat");
final Path tmpFile2 = new Path("/tmpfile2.dat");
DFSTestUtil.createFile(fs, tmpFile1, 1024, (short) 2, 0xDEADDEADl);
DFSTestUtil.createFile(fs, tmpFile2, 1024, (short) 2, 0xDEADDEADl);
// Make sure files are fully replicated before continuing
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
try {
List<BlockLocation> list = Lists.newArrayList();
list.addAll(Arrays.asList(fs.getFileBlockLocations(tmpFile1, 0,
1024)));
list.addAll(Arrays.asList(fs.getFileBlockLocations(tmpFile2, 0,
1024)));
int totalRepl = 0;
for (BlockLocation loc : list) {
totalRepl += loc.getHosts().length;
}
if (totalRepl == 4) {
return true;
}
} catch(IOException e) {
// swallow
}
return false;
}
}, 500, 30000);
// Get locations of blocks of both files and concat together
BlockLocation[] blockLocs1 = fs.getFileBlockLocations(tmpFile1, 0, 1024);
BlockLocation[] blockLocs2 = fs.getFileBlockLocations(tmpFile2, 0, 1024);
BlockLocation[] blockLocs = (BlockLocation[]) ArrayUtils.addAll(blockLocs1,
blockLocs2);
// Fetch VolumeBlockLocations in batch
BlockStorageLocation[] locs = fs.getFileBlockStorageLocations(Arrays
.asList(blockLocs));
int counter = 0;
// Print out the list of ids received for each block
for (BlockStorageLocation l : locs) {
for (int i = 0; i < l.getVolumeIds().length; i++) {
VolumeId id = l.getVolumeIds()[i];
String name = l.getNames()[i];
if (id != null) {
System.out.println("Datanode " + name + " has block " + counter
+ " on volume id " + id.toString());
}
}
counter++;
}
assertEquals("Expected two HdfsBlockLocations for two 1-block files", 2,
locs.length);
for (BlockStorageLocation l : locs) {
assertEquals("Expected two replicas for each block", 2,
l.getVolumeIds().length);
for (int i = 0; i < l.getVolumeIds().length; i++) {
VolumeId id = l.getVolumeIds()[i];
String name = l.getNames()[i];
assertTrue("Expected block to be valid on datanode " + name,
id != null);
}
}
} finally {
cluster.shutdown();
}
}
/**
* Tests error paths for
* {@link DistributedFileSystem#getFileBlockStorageLocations(java.util.List)}
*/
@Test(timeout=60000)
public void testGetFileBlockStorageLocationsError() throws Exception {
final Configuration conf = getTestConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED,
true);
conf.setInt(
DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS, 1500);
conf.setInt(
CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
cluster.getDataNodes();
final DistributedFileSystem fs = cluster.getFileSystem();
// Create a few files and add together their block locations into
// a list.
final Path tmpFile1 = new Path("/errorfile1.dat");
final Path tmpFile2 = new Path("/errorfile2.dat");
DFSTestUtil.createFile(fs, tmpFile1, 1024, (short) 2, 0xDEADDEADl);
DFSTestUtil.createFile(fs, tmpFile2, 1024, (short) 2, 0xDEADDEADl);
// Make sure files are fully replicated before continuing
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
try {
List<BlockLocation> list = Lists.newArrayList();
list.addAll(Arrays.asList(fs.getFileBlockLocations(tmpFile1, 0,
1024)));
list.addAll(Arrays.asList(fs.getFileBlockLocations(tmpFile2, 0,
1024)));
int totalRepl = 0;
for (BlockLocation loc : list) {
totalRepl += loc.getHosts().length;
}
if (totalRepl == 4) {
return true;
}
} catch(IOException e) {
// swallow
}
return false;
}
}, 500, 30000);
BlockLocation[] blockLocs1 = fs.getFileBlockLocations(tmpFile1, 0, 1024);
BlockLocation[] blockLocs2 = fs.getFileBlockLocations(tmpFile2, 0, 1024);
List<BlockLocation> allLocs = Lists.newArrayList();
allLocs.addAll(Arrays.asList(blockLocs1));
allLocs.addAll(Arrays.asList(blockLocs2));
// Stall on the DN to test the timeout
DataNodeFaultInjector injector = Mockito.mock(DataNodeFaultInjector.class);
Mockito.doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
Thread.sleep(3000);
return null;
}
}).when(injector).getHdfsBlocksMetadata();
DataNodeFaultInjector.instance = injector;
BlockStorageLocation[] locs = fs.getFileBlockStorageLocations(allLocs);
for (BlockStorageLocation loc: locs) {
assertEquals(
"Found more than 0 cached hosts although RPCs supposedly timed out",
0, loc.getCachedHosts().length);
}
// Restore a default injector
DataNodeFaultInjector.instance = new DataNodeFaultInjector();
// Stop a datanode to simulate a failure.
DataNodeProperties stoppedNode = cluster.stopDataNode(0);
// Fetch VolumeBlockLocations
locs = fs.getFileBlockStorageLocations(allLocs);
assertEquals("Expected two HdfsBlockLocation for two 1-block files", 2,
locs.length);
for (BlockStorageLocation l : locs) {
assertEquals("Expected two replicas for each block", 2,
l.getHosts().length);
assertEquals("Expected two VolumeIDs for each block", 2,
l.getVolumeIds().length);
assertTrue("Expected one valid and one invalid volume",
(l.getVolumeIds()[0] == null) ^ (l.getVolumeIds()[1] == null));
}
// Start the datanode again, and remove one of the blocks.
// This is a different type of failure where the block itself
// is invalid.
cluster.restartDataNode(stoppedNode, true /*keepPort*/);
cluster.waitActive();
fs.delete(tmpFile2, true);
HATestUtil.waitForNNToIssueDeletions(cluster.getNameNode());
cluster.triggerHeartbeats();
HATestUtil.waitForDNDeletions(cluster);
locs = fs.getFileBlockStorageLocations(allLocs);
assertEquals("Expected two HdfsBlockLocations for two 1-block files", 2,
locs.length);
assertNotNull(locs[0].getVolumeIds()[0]);
assertNotNull(locs[0].getVolumeIds()[1]);
assertNull(locs[1].getVolumeIds()[0]);
assertNull(locs[1].getVolumeIds()[1]);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test
public void testCreateWithCustomChecksum() throws Exception {
Configuration conf = getTestConfiguration();
MiniDFSCluster cluster = null;
Path testBasePath = new Path("/test/csum");
// create args
Path path1 = new Path(testBasePath, "file_wtih_crc1");
Path path2 = new Path(testBasePath, "file_with_crc2");
ChecksumOpt opt1 = new ChecksumOpt(DataChecksum.Type.CRC32C, 512);
ChecksumOpt opt2 = new ChecksumOpt(DataChecksum.Type.CRC32, 512);
// common args
FsPermission perm = FsPermission.getDefault().applyUMask(
FsPermission.getUMask(conf));
EnumSet<CreateFlag> flags = EnumSet.of(CreateFlag.OVERWRITE,
CreateFlag.CREATE);
short repl = 1;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
FileSystem dfs = cluster.getFileSystem();
dfs.mkdirs(testBasePath);
// create two files with different checksum types
FSDataOutputStream out1 = dfs.create(path1, perm, flags, 4096, repl,
131072L, null, opt1);
FSDataOutputStream out2 = dfs.create(path2, perm, flags, 4096, repl,
131072L, null, opt2);
for (int i = 0; i < 1024; i++) {
out1.write(i);
out2.write(i);
}
out1.close();
out2.close();
// the two checksums must be different.
MD5MD5CRC32FileChecksum sum1 =
(MD5MD5CRC32FileChecksum)dfs.getFileChecksum(path1);
MD5MD5CRC32FileChecksum sum2 =
(MD5MD5CRC32FileChecksum)dfs.getFileChecksum(path2);
assertFalse(sum1.equals(sum2));
// check the individual params
assertEquals(DataChecksum.Type.CRC32C, sum1.getCrcType());
assertEquals(DataChecksum.Type.CRC32, sum2.getCrcType());
} finally {
if (cluster != null) {
cluster.getFileSystem().delete(testBasePath, true);
cluster.shutdown();
}
}
}
@Test(timeout=60000)
public void testFileCloseStatus() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
DistributedFileSystem fs = cluster.getFileSystem();
try {
// create a new file.
Path file = new Path("/simpleFlush.dat");
FSDataOutputStream output = fs.create(file);
// write to file
output.writeBytes("Some test data");
output.flush();
assertFalse("File status should be open", fs.isFileClosed(file));
output.close();
assertTrue("File status should be closed", fs.isFileClosed(file));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test(timeout=60000)
public void testListFiles() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
DistributedFileSystem fs = cluster.getFileSystem();
final Path relative = new Path("relative");
fs.create(new Path(relative, "foo")).close();
final List<LocatedFileStatus> retVal = new ArrayList<LocatedFileStatus>();
final RemoteIterator<LocatedFileStatus> iter = fs.listFiles(relative, true);
while (iter.hasNext()) {
retVal.add(iter.next());
}
System.out.println("retVal = " + retVal);
} finally {
cluster.shutdown();
}
}
@Test(timeout=10000)
public void testDFSClientPeerReadTimeout() throws IOException {
final int timeout = 1000;
final Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, timeout);
// only need cluster to create a dfs client to get a peer
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitActive();
DistributedFileSystem dfs = cluster.getFileSystem();
// use a dummy socket to ensure the read timesout
ServerSocket socket = new ServerSocket(0);
Peer peer = dfs.getClient().newConnectedPeer(
(InetSocketAddress) socket.getLocalSocketAddress(), null, null);
long start = Time.now();
try {
peer.getInputStream().read();
Assert.fail("read should timeout");
} catch (SocketTimeoutException ste) {
long delta = Time.now() - start;
Assert.assertTrue("read timedout too soon", delta >= timeout*0.9);
Assert.assertTrue("read timedout too late", delta <= timeout*1.1);
} catch (Throwable t) {
Assert.fail("wrong exception:"+t);
}
} finally {
cluster.shutdown();
}
}
@Test(timeout=60000)
public void testGetServerDefaults() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitActive();
DistributedFileSystem dfs = cluster.getFileSystem();
FsServerDefaults fsServerDefaults = dfs.getServerDefaults();
Assert.assertNotNull(fsServerDefaults);
} finally {
cluster.shutdown();
}
}
@Test(timeout=10000)
public void testDFSClientPeerWriteTimeout() throws IOException {
final int timeout = 1000;
final Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, timeout);
// only need cluster to create a dfs client to get a peer
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitActive();
DistributedFileSystem dfs = cluster.getFileSystem();
// Write 1 MB to a dummy socket to ensure the write times out
ServerSocket socket = new ServerSocket(0);
Peer peer = dfs.getClient().newConnectedPeer(
(InetSocketAddress) socket.getLocalSocketAddress(), null, null);
long start = Time.now();
try {
byte[] buf = new byte[1024 * 1024];
peer.getOutputStream().write(buf);
Assert.fail("write should timeout");
} catch (SocketTimeoutException ste) {
long delta = Time.now() - start;
Assert.assertTrue("write timedout too soon", delta >= timeout * 0.9);
Assert.assertTrue("write timedout too late", delta <= timeout * 1.1);
} catch (Throwable t) {
Assert.fail("wrong exception:" + t);
}
} finally {
cluster.shutdown();
}
}
}
| 39,073 | 34.847706 | 98 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.*;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockStoragePolicySpi;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.protocol.*;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.server.blockmanagement.*;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.PathUtils;
import org.junit.Assert;
import org.junit.Test;
/** Test {@link BlockStoragePolicy} */
public class TestBlockStoragePolicy {
public static final BlockStoragePolicySuite POLICY_SUITE;
public static final BlockStoragePolicy DEFAULT_STORAGE_POLICY;
public static final Configuration conf;
static {
conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
POLICY_SUITE = BlockStoragePolicySuite.createDefaultSuite();
DEFAULT_STORAGE_POLICY = POLICY_SUITE.getDefaultPolicy();
}
static final EnumSet<StorageType> none = EnumSet.noneOf(StorageType.class);
static final EnumSet<StorageType> archive = EnumSet.of(StorageType.ARCHIVE);
static final EnumSet<StorageType> disk = EnumSet.of(StorageType.DISK);
static final EnumSet<StorageType> ssd = EnumSet.of(StorageType.SSD);
static final EnumSet<StorageType> disk_archive = EnumSet.of(StorageType.DISK,
StorageType.ARCHIVE);
static final EnumSet<StorageType> all = EnumSet.of(StorageType.SSD,
StorageType.DISK, StorageType.ARCHIVE);
static final long FILE_LEN = 1024;
static final short REPLICATION = 3;
static final byte COLD = HdfsServerConstants.COLD_STORAGE_POLICY_ID;
static final byte WARM = HdfsServerConstants.WARM_STORAGE_POLICY_ID;
static final byte HOT = HdfsServerConstants.HOT_STORAGE_POLICY_ID;
static final byte ONESSD = HdfsServerConstants.ONESSD_STORAGE_POLICY_ID;
static final byte ALLSSD = HdfsServerConstants.ALLSSD_STORAGE_POLICY_ID;
static final byte LAZY_PERSIST = HdfsServerConstants.MEMORY_STORAGE_POLICY_ID;
@Test (timeout=300000)
public void testConfigKeyEnabled() throws IOException {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY, true);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(1).build();
try {
cluster.waitActive();
cluster.getFileSystem().setStoragePolicy(new Path("/"),
HdfsConstants.COLD_STORAGE_POLICY_NAME);
} finally {
cluster.shutdown();
}
}
/**
* Ensure that setStoragePolicy throws IOException when
* dfs.storage.policy.enabled is set to false.
* @throws IOException
*/
@Test (timeout=300000, expected=IOException.class)
public void testConfigKeyDisabled() throws IOException {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY, false);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(1).build();
try {
cluster.waitActive();
cluster.getFileSystem().setStoragePolicy(new Path("/"),
HdfsConstants.COLD_STORAGE_POLICY_NAME);
} finally {
cluster.shutdown();
}
}
@Test
public void testDefaultPolicies() {
final Map<Byte, String> expectedPolicyStrings = new HashMap<Byte, String>();
expectedPolicyStrings.put(COLD,
"BlockStoragePolicy{COLD:" + COLD + ", storageTypes=[ARCHIVE], " +
"creationFallbacks=[], replicationFallbacks=[]}");
expectedPolicyStrings.put(WARM,
"BlockStoragePolicy{WARM:" + WARM + ", storageTypes=[DISK, ARCHIVE], " +
"creationFallbacks=[DISK, ARCHIVE], " +
"replicationFallbacks=[DISK, ARCHIVE]}");
expectedPolicyStrings.put(HOT,
"BlockStoragePolicy{HOT:" + HOT + ", storageTypes=[DISK], " +
"creationFallbacks=[], replicationFallbacks=[ARCHIVE]}");
expectedPolicyStrings.put(ONESSD, "BlockStoragePolicy{ONE_SSD:" + ONESSD +
", storageTypes=[SSD, DISK], creationFallbacks=[SSD, DISK], " +
"replicationFallbacks=[SSD, DISK]}");
expectedPolicyStrings.put(ALLSSD, "BlockStoragePolicy{ALL_SSD:" + ALLSSD +
", storageTypes=[SSD], creationFallbacks=[DISK], " +
"replicationFallbacks=[DISK]}");
expectedPolicyStrings.put(LAZY_PERSIST,
"BlockStoragePolicy{LAZY_PERSIST:" + LAZY_PERSIST + ", storageTypes=[RAM_DISK, DISK], " +
"creationFallbacks=[DISK], replicationFallbacks=[DISK]}");
for(byte i = 1; i < 16; i++) {
final BlockStoragePolicy policy = POLICY_SUITE.getPolicy(i);
if (policy != null) {
final String s = policy.toString();
Assert.assertEquals(expectedPolicyStrings.get(i), s);
}
}
Assert.assertEquals(POLICY_SUITE.getPolicy(HOT), POLICY_SUITE.getDefaultPolicy());
{ // check Cold policy
final BlockStoragePolicy cold = POLICY_SUITE.getPolicy(COLD);
for(short replication = 1; replication < 6; replication++) {
final List<StorageType> computed = cold.chooseStorageTypes(replication);
assertStorageType(computed, replication, StorageType.ARCHIVE);
}
assertCreationFallback(cold, null, null, null, null, null);
assertReplicationFallback(cold, null, null, null, null);
}
{ // check Warm policy
final BlockStoragePolicy warm = POLICY_SUITE.getPolicy(WARM);
for(short replication = 1; replication < 6; replication++) {
final List<StorageType> computed = warm.chooseStorageTypes(replication);
assertStorageType(computed, replication, StorageType.DISK, StorageType.ARCHIVE);
}
assertCreationFallback(warm, StorageType.DISK, StorageType.DISK,
StorageType.ARCHIVE, StorageType.DISK, null);
assertReplicationFallback(warm, StorageType.DISK, StorageType.DISK,
StorageType.ARCHIVE, StorageType.DISK);
}
{ // check Hot policy
final BlockStoragePolicy hot = POLICY_SUITE.getPolicy(HOT);
for(short replication = 1; replication < 6; replication++) {
final List<StorageType> computed = hot.chooseStorageTypes(replication);
assertStorageType(computed, replication, StorageType.DISK);
}
assertCreationFallback(hot, null, null, null, null, null);
assertReplicationFallback(hot, StorageType.ARCHIVE, null,
StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{ // check ONE_SSD policy
final BlockStoragePolicy onessd = POLICY_SUITE.getPolicy(ONESSD);
for (short replication = 1; replication < 6; replication++) {
final List<StorageType> computed = onessd
.chooseStorageTypes(replication);
assertStorageType(computed, replication, StorageType.SSD,
StorageType.DISK);
}
assertCreationFallback(onessd, StorageType.SSD, StorageType.SSD,
StorageType.SSD, StorageType.DISK, StorageType.SSD);
assertReplicationFallback(onessd, StorageType.SSD, StorageType.SSD,
StorageType.SSD, StorageType.DISK);
}
{ // check ALL_SSD policy
final BlockStoragePolicy allssd = POLICY_SUITE.getPolicy(ALLSSD);
for (short replication = 1; replication < 6; replication++) {
final List<StorageType> computed = allssd
.chooseStorageTypes(replication);
assertStorageType(computed, replication, StorageType.SSD);
}
assertCreationFallback(allssd, StorageType.DISK, StorageType.DISK, null,
StorageType.DISK, null);
assertReplicationFallback(allssd, StorageType.DISK, StorageType.DISK,
null, StorageType.DISK);
}
{ // check LAZY_PERSIST policy
final BlockStoragePolicy lazyPersist = POLICY_SUITE
.getPolicy(LAZY_PERSIST);
for (short replication = 1; replication < 6; replication++) {
final List<StorageType> computed = lazyPersist
.chooseStorageTypes(replication);
assertStorageType(computed, replication, StorageType.DISK);
}
assertCreationFallback(lazyPersist, StorageType.DISK, StorageType.DISK,
null, StorageType.DISK, null);
assertReplicationFallback(lazyPersist, StorageType.DISK,
StorageType.DISK, null, StorageType.DISK);
}
}
static StorageType[] newStorageTypes(int nDisk, int nArchive) {
final StorageType[] t = new StorageType[nDisk + nArchive];
Arrays.fill(t, 0, nDisk, StorageType.DISK);
Arrays.fill(t, nDisk, t.length, StorageType.ARCHIVE);
return t;
}
static List<StorageType> asList(int nDisk, int nArchive) {
return Arrays.asList(newStorageTypes(nDisk, nArchive));
}
static void assertStorageType(List<StorageType> computed, short replication,
StorageType... answers) {
Assert.assertEquals(replication, computed.size());
final StorageType last = answers[answers.length - 1];
for(int i = 0; i < computed.size(); i++) {
final StorageType expected = i < answers.length? answers[i]: last;
Assert.assertEquals(expected, computed.get(i));
}
}
static void assertCreationFallback(BlockStoragePolicy policy,
StorageType noneExpected, StorageType archiveExpected,
StorageType diskExpected, StorageType ssdExpected,
StorageType disk_archiveExpected) {
Assert.assertEquals(noneExpected, policy.getCreationFallback(none));
Assert.assertEquals(archiveExpected, policy.getCreationFallback(archive));
Assert.assertEquals(diskExpected, policy.getCreationFallback(disk));
Assert.assertEquals(ssdExpected, policy.getCreationFallback(ssd));
Assert.assertEquals(disk_archiveExpected,
policy.getCreationFallback(disk_archive));
Assert.assertEquals(null, policy.getCreationFallback(all));
}
static void assertReplicationFallback(BlockStoragePolicy policy,
StorageType noneExpected, StorageType archiveExpected,
StorageType diskExpected, StorageType ssdExpected) {
Assert.assertEquals(noneExpected, policy.getReplicationFallback(none));
Assert
.assertEquals(archiveExpected, policy.getReplicationFallback(archive));
Assert.assertEquals(diskExpected, policy.getReplicationFallback(disk));
Assert.assertEquals(ssdExpected, policy.getReplicationFallback(ssd));
Assert.assertEquals(null, policy.getReplicationFallback(all));
}
private static interface CheckChooseStorageTypes {
public void checkChooseStorageTypes(BlockStoragePolicy p, short replication,
List<StorageType> chosen, StorageType... expected);
/** Basic case: pass only replication and chosen */
static final CheckChooseStorageTypes Basic = new CheckChooseStorageTypes() {
@Override
public void checkChooseStorageTypes(BlockStoragePolicy p, short replication,
List<StorageType> chosen, StorageType... expected) {
final List<StorageType> types = p.chooseStorageTypes(replication, chosen);
assertStorageTypes(types, expected);
}
};
/** With empty unavailables and isNewBlock=true */
static final CheckChooseStorageTypes EmptyUnavailablesAndNewBlock
= new CheckChooseStorageTypes() {
@Override
public void checkChooseStorageTypes(BlockStoragePolicy p,
short replication, List<StorageType> chosen, StorageType... expected) {
final List<StorageType> types = p.chooseStorageTypes(replication,
chosen, none, true);
assertStorageTypes(types, expected);
}
};
/** With empty unavailables and isNewBlock=false */
static final CheckChooseStorageTypes EmptyUnavailablesAndNonNewBlock
= new CheckChooseStorageTypes() {
@Override
public void checkChooseStorageTypes(BlockStoragePolicy p,
short replication, List<StorageType> chosen, StorageType... expected) {
final List<StorageType> types = p.chooseStorageTypes(replication,
chosen, none, false);
assertStorageTypes(types, expected);
}
};
/** With both DISK and ARCHIVE unavailables and isNewBlock=true */
static final CheckChooseStorageTypes BothUnavailableAndNewBlock
= new CheckChooseStorageTypes() {
@Override
public void checkChooseStorageTypes(BlockStoragePolicy p,
short replication, List<StorageType> chosen, StorageType... expected) {
final List<StorageType> types = p.chooseStorageTypes(replication,
chosen, disk_archive, true);
assertStorageTypes(types, expected);
}
};
/** With both DISK and ARCHIVE unavailable and isNewBlock=false */
static final CheckChooseStorageTypes BothUnavailableAndNonNewBlock
= new CheckChooseStorageTypes() {
@Override
public void checkChooseStorageTypes(BlockStoragePolicy p,
short replication, List<StorageType> chosen, StorageType... expected) {
final List<StorageType> types = p.chooseStorageTypes(replication,
chosen, disk_archive, false);
assertStorageTypes(types, expected);
}
};
/** With ARCHIVE unavailable and isNewBlock=true */
static final CheckChooseStorageTypes ArchivalUnavailableAndNewBlock
= new CheckChooseStorageTypes() {
@Override
public void checkChooseStorageTypes(BlockStoragePolicy p,
short replication, List<StorageType> chosen, StorageType... expected) {
final List<StorageType> types = p.chooseStorageTypes(replication,
chosen, archive, true);
assertStorageTypes(types, expected);
}
};
/** With ARCHIVE unavailable and isNewBlock=true */
static final CheckChooseStorageTypes ArchivalUnavailableAndNonNewBlock
= new CheckChooseStorageTypes() {
@Override
public void checkChooseStorageTypes(BlockStoragePolicy p,
short replication, List<StorageType> chosen, StorageType... expected) {
final List<StorageType> types = p.chooseStorageTypes(replication,
chosen, archive, false);
assertStorageTypes(types, expected);
}
};
}
@Test
public void testChooseStorageTypes() {
run(CheckChooseStorageTypes.Basic);
run(CheckChooseStorageTypes.EmptyUnavailablesAndNewBlock);
run(CheckChooseStorageTypes.EmptyUnavailablesAndNonNewBlock);
}
private static void run(CheckChooseStorageTypes method) {
final BlockStoragePolicy hot = POLICY_SUITE.getPolicy(HOT);
final BlockStoragePolicy warm = POLICY_SUITE.getPolicy(WARM);
final BlockStoragePolicy cold = POLICY_SUITE.getPolicy(COLD);
final short replication = 3;
{
final List<StorageType> chosen = Lists.newArrayList();
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK, StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen,
StorageType.DISK, StorageType.ARCHIVE, StorageType.ARCHIVE);
method.checkChooseStorageTypes(cold, replication, chosen,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(StorageType.DISK);
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen,
StorageType.ARCHIVE, StorageType.ARCHIVE);
method.checkChooseStorageTypes(cold, replication, chosen,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(StorageType.ARCHIVE);
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK, StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen,
StorageType.DISK, StorageType.ARCHIVE);
method.checkChooseStorageTypes(cold, replication, chosen,
StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen,
StorageType.ARCHIVE, StorageType.ARCHIVE);
method.checkChooseStorageTypes(cold, replication, chosen,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.ARCHIVE);
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen,
StorageType.ARCHIVE);
method.checkChooseStorageTypes(cold, replication, chosen,
StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.ARCHIVE, StorageType.ARCHIVE);
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK, StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen,
StorageType.DISK);
method.checkChooseStorageTypes(cold, replication, chosen,
StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(hot, replication, chosen);
method.checkChooseStorageTypes(warm, replication, chosen,
StorageType.ARCHIVE, StorageType.ARCHIVE);
method.checkChooseStorageTypes(cold, replication, chosen,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.DISK, StorageType.ARCHIVE);
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen,
StorageType.ARCHIVE);
method.checkChooseStorageTypes(cold, replication, chosen,
StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.ARCHIVE, StorageType.ARCHIVE);
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen);
method.checkChooseStorageTypes(cold, replication, chosen,
StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK, StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen,
StorageType.DISK);
method.checkChooseStorageTypes(cold, replication, chosen);
}
}
@Test
public void testChooseStorageTypesWithBothUnavailable() {
runWithBothUnavailable(CheckChooseStorageTypes.BothUnavailableAndNewBlock);
runWithBothUnavailable(CheckChooseStorageTypes.BothUnavailableAndNonNewBlock);
}
private static void runWithBothUnavailable(CheckChooseStorageTypes method) {
final BlockStoragePolicy hot = POLICY_SUITE.getPolicy(HOT);
final BlockStoragePolicy warm = POLICY_SUITE.getPolicy(WARM);
final BlockStoragePolicy cold = POLICY_SUITE.getPolicy(COLD);
final short replication = 3;
for(int n = 0; n <= 3; n++) {
for(int d = 0; d <= n; d++) {
final int a = n - d;
final List<StorageType> chosen = asList(d, a);
method.checkChooseStorageTypes(hot, replication, chosen);
method.checkChooseStorageTypes(warm, replication, chosen);
method.checkChooseStorageTypes(cold, replication, chosen);
}
}
}
@Test
public void testChooseStorageTypesWithDiskUnavailableAndNewBlock() {
final BlockStoragePolicy hot = POLICY_SUITE.getPolicy(HOT);
final BlockStoragePolicy warm = POLICY_SUITE.getPolicy(WARM);
final BlockStoragePolicy cold = POLICY_SUITE.getPolicy(COLD);
final short replication = 3;
final EnumSet<StorageType> unavailables = disk;
final boolean isNewBlock = true;
{
final List<StorageType> chosen = Lists.newArrayList();
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(StorageType.DISK);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(StorageType.ARCHIVE);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.DISK);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.ARCHIVE);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.DISK, StorageType.DISK);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.DISK, StorageType.ARCHIVE);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock);
}
}
@Test
public void testChooseStorageTypesWithArchiveUnavailable() {
runWithArchiveUnavailable(CheckChooseStorageTypes.ArchivalUnavailableAndNewBlock);
runWithArchiveUnavailable(CheckChooseStorageTypes.ArchivalUnavailableAndNonNewBlock);
}
private static void runWithArchiveUnavailable(CheckChooseStorageTypes method) {
final BlockStoragePolicy hot = POLICY_SUITE.getPolicy(HOT);
final BlockStoragePolicy warm = POLICY_SUITE.getPolicy(WARM);
final BlockStoragePolicy cold = POLICY_SUITE.getPolicy(COLD);
final short replication = 3;
{
final List<StorageType> chosen = Lists.newArrayList();
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK, StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen,
StorageType.DISK, StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(cold, replication, chosen);
}
{
final List<StorageType> chosen = Arrays.asList(StorageType.DISK);
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen,
StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(cold, replication, chosen);
}
{
final List<StorageType> chosen = Arrays.asList(StorageType.ARCHIVE);
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK, StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen,
StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(cold, replication, chosen);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen,
StorageType.DISK);
method.checkChooseStorageTypes(cold, replication, chosen);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.ARCHIVE);
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen,
StorageType.DISK);
method.checkChooseStorageTypes(cold, replication, chosen);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.ARCHIVE, StorageType.ARCHIVE);
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK, StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen,
StorageType.DISK);
method.checkChooseStorageTypes(cold, replication, chosen);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(hot, replication, chosen);
method.checkChooseStorageTypes(warm, replication, chosen);
method.checkChooseStorageTypes(cold, replication, chosen);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.DISK, StorageType.ARCHIVE);
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen);
method.checkChooseStorageTypes(cold, replication, chosen);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.ARCHIVE, StorageType.ARCHIVE);
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen);
method.checkChooseStorageTypes(cold, replication, chosen);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK, StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen,
StorageType.DISK);
method.checkChooseStorageTypes(cold, replication, chosen);
}
}
@Test
public void testChooseStorageTypesWithDiskUnavailableAndNonNewBlock() {
final BlockStoragePolicy hot = POLICY_SUITE.getPolicy(HOT);
final BlockStoragePolicy warm = POLICY_SUITE.getPolicy(WARM);
final BlockStoragePolicy cold = POLICY_SUITE.getPolicy(COLD);
final short replication = 3;
final EnumSet<StorageType> unavailables = disk;
final boolean isNewBlock = false;
{
final List<StorageType> chosen = Lists.newArrayList();
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(StorageType.DISK);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(StorageType.ARCHIVE);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.DISK);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.ARCHIVE);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.DISK, StorageType.DISK);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.DISK, StorageType.ARCHIVE);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock);
}
}
static void checkChooseStorageTypes(BlockStoragePolicy p, short replication,
List<StorageType> chosen, EnumSet<StorageType> unavailables,
boolean isNewBlock, StorageType... expected) {
final List<StorageType> types = p.chooseStorageTypes(replication, chosen,
unavailables, isNewBlock);
assertStorageTypes(types, expected);
}
static void assertStorageTypes(List<StorageType> computed, StorageType... expected) {
assertStorageTypes(computed.toArray(StorageType.EMPTY_ARRAY), expected);
}
static void assertStorageTypes(StorageType[] computed, StorageType... expected) {
Arrays.sort(expected);
Arrays.sort(computed);
Assert.assertArrayEquals(expected, computed);
}
@Test
public void testChooseExcess() {
final BlockStoragePolicy hot = POLICY_SUITE.getPolicy(HOT);
final BlockStoragePolicy warm = POLICY_SUITE.getPolicy(WARM);
final BlockStoragePolicy cold = POLICY_SUITE.getPolicy(COLD);
final short replication = 3;
for(int n = 0; n <= 6; n++) {
for(int d = 0; d <= n; d++) {
final int a = n - d;
final List<StorageType> chosen = asList(d, a);
{
final int nDisk = Math.max(0, d - replication);
final int nArchive = a;
final StorageType[] expected = newStorageTypes(nDisk, nArchive);
checkChooseExcess(hot, replication, chosen, expected);
}
{
final int nDisk = Math.max(0, d - 1);
final int nArchive = Math.max(0, a - replication + 1);
final StorageType[] expected = newStorageTypes(nDisk, nArchive);
checkChooseExcess(warm, replication, chosen, expected);
}
{
final int nDisk = d;
final int nArchive = Math.max(0, a - replication );
final StorageType[] expected = newStorageTypes(nDisk, nArchive);
checkChooseExcess(cold, replication, chosen, expected);
}
}
}
}
static void checkChooseExcess(BlockStoragePolicy p, short replication,
List<StorageType> chosen, StorageType... expected) {
final List<StorageType> types = p.chooseExcess(replication, chosen);
assertStorageTypes(types, expected);
}
private void checkDirectoryListing(HdfsFileStatus[] stats, byte... policies) {
Assert.assertEquals(stats.length, policies.length);
for (int i = 0; i < stats.length; i++) {
Assert.assertEquals(stats[i].getStoragePolicy(), policies[i]);
}
}
@Test
public void testSetStoragePolicy() throws Exception {
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(REPLICATION).build();
cluster.waitActive();
final DistributedFileSystem fs = cluster.getFileSystem();
try {
final Path dir = new Path("/testSetStoragePolicy");
final Path fooFile = new Path(dir, "foo");
final Path barDir = new Path(dir, "bar");
final Path barFile1= new Path(barDir, "f1");
final Path barFile2= new Path(barDir, "f2");
DFSTestUtil.createFile(fs, fooFile, FILE_LEN, REPLICATION, 0L);
DFSTestUtil.createFile(fs, barFile1, FILE_LEN, REPLICATION, 0L);
DFSTestUtil.createFile(fs, barFile2, FILE_LEN, REPLICATION, 0L);
final String invalidPolicyName = "INVALID-POLICY";
try {
fs.setStoragePolicy(fooFile, invalidPolicyName);
Assert.fail("Should throw a HadoopIllegalArgumentException");
} catch (RemoteException e) {
GenericTestUtils.assertExceptionContains(invalidPolicyName, e);
}
// check storage policy
HdfsFileStatus[] dirList = fs.getClient().listPaths(dir.toString(),
HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
HdfsFileStatus[] barList = fs.getClient().listPaths(barDir.toString(),
HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
checkDirectoryListing(dirList, BLOCK_STORAGE_POLICY_ID_UNSPECIFIED,
BLOCK_STORAGE_POLICY_ID_UNSPECIFIED);
checkDirectoryListing(barList, BLOCK_STORAGE_POLICY_ID_UNSPECIFIED,
BLOCK_STORAGE_POLICY_ID_UNSPECIFIED);
final Path invalidPath = new Path("/invalidPath");
try {
fs.setStoragePolicy(invalidPath, HdfsConstants.WARM_STORAGE_POLICY_NAME);
Assert.fail("Should throw a FileNotFoundException");
} catch (FileNotFoundException e) {
GenericTestUtils.assertExceptionContains(invalidPath.toString(), e);
}
try {
fs.getStoragePolicy(invalidPath);
Assert.fail("Should throw a FileNotFoundException");
} catch (FileNotFoundException e) {
GenericTestUtils.assertExceptionContains(invalidPath.toString(), e);
}
fs.setStoragePolicy(fooFile, HdfsConstants.COLD_STORAGE_POLICY_NAME);
fs.setStoragePolicy(barDir, HdfsConstants.WARM_STORAGE_POLICY_NAME);
fs.setStoragePolicy(barFile2, HdfsConstants.HOT_STORAGE_POLICY_NAME);
Assert.assertEquals("File storage policy should be COLD",
HdfsConstants.COLD_STORAGE_POLICY_NAME,
fs.getStoragePolicy(fooFile).getName());
Assert.assertEquals("File storage policy should be WARM",
HdfsConstants.WARM_STORAGE_POLICY_NAME,
fs.getStoragePolicy(barDir).getName());
Assert.assertEquals("File storage policy should be HOT",
HdfsConstants.HOT_STORAGE_POLICY_NAME,
fs.getStoragePolicy(barFile2).getName());
dirList = fs.getClient().listPaths(dir.toString(),
HdfsFileStatus.EMPTY_NAME).getPartialListing();
barList = fs.getClient().listPaths(barDir.toString(),
HdfsFileStatus.EMPTY_NAME).getPartialListing();
checkDirectoryListing(dirList, WARM, COLD); // bar is warm, foo is cold
checkDirectoryListing(barList, WARM, HOT);
// restart namenode to make sure the editlog is correct
cluster.restartNameNode(true);
dirList = fs.getClient().listPaths(dir.toString(),
HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
barList = fs.getClient().listPaths(barDir.toString(),
HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
checkDirectoryListing(dirList, WARM, COLD); // bar is warm, foo is cold
checkDirectoryListing(barList, WARM, HOT);
// restart namenode with checkpoint to make sure the fsimage is correct
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
fs.saveNamespace();
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
cluster.restartNameNode(true);
dirList = fs.getClient().listPaths(dir.toString(),
HdfsFileStatus.EMPTY_NAME).getPartialListing();
barList = fs.getClient().listPaths(barDir.toString(),
HdfsFileStatus.EMPTY_NAME).getPartialListing();
checkDirectoryListing(dirList, WARM, COLD); // bar is warm, foo is cold
checkDirectoryListing(barList, WARM, HOT);
} finally {
cluster.shutdown();
}
}
@Test
public void testGetStoragePolicy() throws Exception {
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(REPLICATION).build();
cluster.waitActive();
final DistributedFileSystem fs = cluster.getFileSystem();
try {
final Path dir = new Path("/testGetStoragePolicy");
final Path fooFile = new Path(dir, "foo");
DFSTestUtil.createFile(fs, fooFile, FILE_LEN, REPLICATION, 0L);
DFSClient client = new DFSClient(cluster.getNameNode(0)
.getNameNodeAddress(), conf);
client.setStoragePolicy("/testGetStoragePolicy/foo",
HdfsConstants.COLD_STORAGE_POLICY_NAME);
String policyName = client.getStoragePolicy("/testGetStoragePolicy/foo")
.getName();
Assert.assertEquals("File storage policy should be COLD",
HdfsConstants.COLD_STORAGE_POLICY_NAME, policyName);
} finally {
cluster.shutdown();
}
}
@Test
public void testSetStoragePolicyWithSnapshot() throws Exception {
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(REPLICATION).build();
cluster.waitActive();
final DistributedFileSystem fs = cluster.getFileSystem();
try {
final Path dir = new Path("/testSetStoragePolicyWithSnapshot");
final Path fooDir = new Path(dir, "foo");
final Path fooFile1= new Path(fooDir, "f1");
final Path fooFile2= new Path(fooDir, "f2");
DFSTestUtil.createFile(fs, fooFile1, FILE_LEN, REPLICATION, 0L);
DFSTestUtil.createFile(fs, fooFile2, FILE_LEN, REPLICATION, 0L);
fs.setStoragePolicy(fooDir, HdfsConstants.WARM_STORAGE_POLICY_NAME);
HdfsFileStatus[] dirList = fs.getClient().listPaths(dir.toString(),
HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
checkDirectoryListing(dirList, WARM);
HdfsFileStatus[] fooList = fs.getClient().listPaths(fooDir.toString(),
HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
checkDirectoryListing(fooList, WARM, WARM);
// take snapshot
SnapshotTestHelper.createSnapshot(fs, dir, "s1");
// change the storage policy of fooFile1
fs.setStoragePolicy(fooFile1, HdfsConstants.COLD_STORAGE_POLICY_NAME);
fooList = fs.getClient().listPaths(fooDir.toString(),
HdfsFileStatus.EMPTY_NAME).getPartialListing();
checkDirectoryListing(fooList, COLD, WARM);
// check the policy for /dir/.snapshot/s1/foo/f1. Note we always return
// the latest storage policy for a file/directory.
Path s1f1 = SnapshotTestHelper.getSnapshotPath(dir, "s1", "foo/f1");
DirectoryListing f1Listing = fs.getClient().listPaths(s1f1.toString(),
HdfsFileStatus.EMPTY_NAME);
checkDirectoryListing(f1Listing.getPartialListing(), COLD);
// delete f1
fs.delete(fooFile1, true);
fooList = fs.getClient().listPaths(fooDir.toString(),
HdfsFileStatus.EMPTY_NAME).getPartialListing();
checkDirectoryListing(fooList, WARM);
// check the policy for /dir/.snapshot/s1/foo/f1 again after the deletion
checkDirectoryListing(fs.getClient().listPaths(s1f1.toString(),
HdfsFileStatus.EMPTY_NAME).getPartialListing(), COLD);
// change the storage policy of foo dir
fs.setStoragePolicy(fooDir, HdfsConstants.HOT_STORAGE_POLICY_NAME);
// /dir/foo is now hot
dirList = fs.getClient().listPaths(dir.toString(),
HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
checkDirectoryListing(dirList, HOT);
// /dir/foo/f2 is hot
fooList = fs.getClient().listPaths(fooDir.toString(),
HdfsFileStatus.EMPTY_NAME).getPartialListing();
checkDirectoryListing(fooList, HOT);
// check storage policy of snapshot path
Path s1 = SnapshotTestHelper.getSnapshotRoot(dir, "s1");
Path s1foo = SnapshotTestHelper.getSnapshotPath(dir, "s1", "foo");
checkDirectoryListing(fs.getClient().listPaths(s1.toString(),
HdfsFileStatus.EMPTY_NAME).getPartialListing(), HOT);
// /dir/.snapshot/.s1/foo/f1 and /dir/.snapshot/.s1/foo/f2 should still
// follow the latest
checkDirectoryListing(fs.getClient().listPaths(s1foo.toString(),
HdfsFileStatus.EMPTY_NAME).getPartialListing(), COLD, HOT);
// delete foo
fs.delete(fooDir, true);
checkDirectoryListing(fs.getClient().listPaths(s1.toString(),
HdfsFileStatus.EMPTY_NAME).getPartialListing(), HOT);
checkDirectoryListing(fs.getClient().listPaths(s1foo.toString(),
HdfsFileStatus.EMPTY_NAME).getPartialListing(), COLD, HOT);
} finally {
cluster.shutdown();
}
}
private static StorageType[][] genStorageTypes(int numDataNodes) {
StorageType[][] types = new StorageType[numDataNodes][];
for (int i = 0; i < types.length; i++) {
types[i] = new StorageType[]{StorageType.DISK, StorageType.ARCHIVE};
}
return types;
}
private void checkLocatedBlocks(HdfsLocatedFileStatus status, int blockNum,
int replicaNum, StorageType... types) {
List<StorageType> typeList = Lists.newArrayList();
Collections.addAll(typeList, types);
LocatedBlocks lbs = status.getBlockLocations();
Assert.assertEquals(blockNum, lbs.getLocatedBlocks().size());
for (LocatedBlock lb : lbs.getLocatedBlocks()) {
Assert.assertEquals(replicaNum, lb.getStorageTypes().length);
for (StorageType type : lb.getStorageTypes()) {
Assert.assertTrue(typeList.remove(type));
}
}
Assert.assertTrue(typeList.isEmpty());
}
private void testChangeFileRep(String policyName, byte policyId,
StorageType[] before,
StorageType[] after) throws Exception {
final int numDataNodes = 5;
final StorageType[][] types = genStorageTypes(numDataNodes);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(numDataNodes).storageTypes(types).build();
cluster.waitActive();
final DistributedFileSystem fs = cluster.getFileSystem();
try {
final Path dir = new Path("/test");
fs.mkdirs(dir);
fs.setStoragePolicy(dir, policyName);
final Path foo = new Path(dir, "foo");
DFSTestUtil.createFile(fs, foo, FILE_LEN, REPLICATION, 0L);
HdfsFileStatus[] status = fs.getClient().listPaths(foo.toString(),
HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
checkDirectoryListing(status, policyId);
HdfsLocatedFileStatus fooStatus = (HdfsLocatedFileStatus) status[0];
checkLocatedBlocks(fooStatus, 1, 3, before);
// change the replication factor to 5
fs.setReplication(foo, (short) numDataNodes);
Thread.sleep(1000);
for (DataNode dn : cluster.getDataNodes()) {
DataNodeTestUtils.triggerHeartbeat(dn);
}
Thread.sleep(1000);
status = fs.getClient().listPaths(foo.toString(),
HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
checkDirectoryListing(status, policyId);
fooStatus = (HdfsLocatedFileStatus) status[0];
checkLocatedBlocks(fooStatus, 1, numDataNodes, after);
// change the replication factor back to 3
fs.setReplication(foo, REPLICATION);
Thread.sleep(1000);
for (DataNode dn : cluster.getDataNodes()) {
DataNodeTestUtils.triggerHeartbeat(dn);
}
Thread.sleep(1000);
for (DataNode dn : cluster.getDataNodes()) {
DataNodeTestUtils.triggerBlockReport(dn);
}
Thread.sleep(1000);
status = fs.getClient().listPaths(foo.toString(),
HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
checkDirectoryListing(status, policyId);
fooStatus = (HdfsLocatedFileStatus) status[0];
checkLocatedBlocks(fooStatus, 1, REPLICATION, before);
} finally {
cluster.shutdown();
}
}
/**
* Consider a File with Hot storage policy. Increase replication factor of
* that file from 3 to 5. Make sure all replications are created in DISKS.
*/
@Test
public void testChangeHotFileRep() throws Exception {
testChangeFileRep(HdfsConstants.HOT_STORAGE_POLICY_NAME, HOT,
new StorageType[]{StorageType.DISK, StorageType.DISK,
StorageType.DISK},
new StorageType[]{StorageType.DISK, StorageType.DISK, StorageType.DISK,
StorageType.DISK, StorageType.DISK});
}
/**
* Consider a File with Warm temperature. Increase replication factor of
* that file from 3 to 5. Make sure all replicas are created in DISKS
* and ARCHIVE.
*/
@Test
public void testChangeWarmRep() throws Exception {
testChangeFileRep(HdfsConstants.WARM_STORAGE_POLICY_NAME, WARM,
new StorageType[]{StorageType.DISK, StorageType.ARCHIVE,
StorageType.ARCHIVE},
new StorageType[]{StorageType.DISK, StorageType.ARCHIVE,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE});
}
/**
* Consider a File with Cold temperature. Increase replication factor of
* that file from 3 to 5. Make sure all replicas are created in ARCHIVE.
*/
@Test
public void testChangeColdRep() throws Exception {
testChangeFileRep(HdfsConstants.COLD_STORAGE_POLICY_NAME, COLD,
new StorageType[]{StorageType.ARCHIVE, StorageType.ARCHIVE,
StorageType.ARCHIVE},
new StorageType[]{StorageType.ARCHIVE, StorageType.ARCHIVE,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE});
}
@Test
public void testChooseTargetWithTopology() throws Exception {
BlockStoragePolicy policy1 = new BlockStoragePolicy((byte) 9, "TEST1",
new StorageType[]{StorageType.SSD, StorageType.DISK,
StorageType.ARCHIVE}, new StorageType[]{}, new StorageType[]{});
BlockStoragePolicy policy2 = new BlockStoragePolicy((byte) 11, "TEST2",
new StorageType[]{StorageType.DISK, StorageType.SSD,
StorageType.ARCHIVE}, new StorageType[]{}, new StorageType[]{});
final String[] racks = {"/d1/r1", "/d1/r2", "/d1/r2"};
final String[] hosts = {"host1", "host2", "host3"};
final StorageType[] types = {StorageType.DISK, StorageType.SSD,
StorageType.ARCHIVE};
final DatanodeStorageInfo[] storages = DFSTestUtil
.createDatanodeStorageInfos(3, racks, hosts, types);
final DatanodeDescriptor[] dataNodes = DFSTestUtil
.toDatanodeDescriptor(storages);
FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
new File(baseDir, "name").getPath());
DFSTestUtil.formatNameNode(conf);
NameNode namenode = new NameNode(conf);
final BlockManager bm = namenode.getNamesystem().getBlockManager();
BlockPlacementPolicy replicator = bm.getBlockPlacementPolicy();
NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
for (DatanodeDescriptor datanode : dataNodes) {
cluster.add(datanode);
}
DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3,
dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false,
new HashSet<Node>(), 0, policy1);
System.out.println(Arrays.asList(targets));
Assert.assertEquals(3, targets.length);
targets = replicator.chooseTarget("/foo", 3,
dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false,
new HashSet<Node>(), 0, policy2);
System.out.println(Arrays.asList(targets));
Assert.assertEquals(3, targets.length);
}
/**
* Test getting all the storage policies from the namenode
*/
@Test
public void testGetAllStoragePolicies() throws Exception {
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(0).build();
cluster.waitActive();
final DistributedFileSystem fs = cluster.getFileSystem();
try {
BlockStoragePolicy[] policies = fs.getStoragePolicies();
Assert.assertEquals(6, policies.length);
Assert.assertEquals(POLICY_SUITE.getPolicy(COLD).toString(),
policies[0].toString());
Assert.assertEquals(POLICY_SUITE.getPolicy(WARM).toString(),
policies[1].toString());
Assert.assertEquals(POLICY_SUITE.getPolicy(HOT).toString(),
policies[2].toString());
} finally {
IOUtils.cleanup(null, fs);
cluster.shutdown();
}
}
@Test
public void testChooseSsdOverDisk() throws Exception {
BlockStoragePolicy policy = new BlockStoragePolicy((byte) 9, "TEST1",
new StorageType[]{StorageType.SSD, StorageType.DISK,
StorageType.ARCHIVE}, new StorageType[]{}, new StorageType[]{});
final String[] racks = {"/d1/r1", "/d1/r1", "/d1/r1"};
final String[] hosts = {"host1", "host2", "host3"};
final StorageType[] disks = {StorageType.DISK, StorageType.DISK, StorageType.DISK};
final DatanodeStorageInfo[] diskStorages
= DFSTestUtil.createDatanodeStorageInfos(3, racks, hosts, disks);
final DatanodeDescriptor[] dataNodes
= DFSTestUtil.toDatanodeDescriptor(diskStorages);
for(int i = 0; i < dataNodes.length; i++) {
BlockManagerTestUtil.updateStorage(dataNodes[i],
new DatanodeStorage("ssd" + i, DatanodeStorage.State.NORMAL,
StorageType.SSD));
}
FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
new File(baseDir, "name").getPath());
DFSTestUtil.formatNameNode(conf);
NameNode namenode = new NameNode(conf);
final BlockManager bm = namenode.getNamesystem().getBlockManager();
BlockPlacementPolicy replicator = bm.getBlockPlacementPolicy();
NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
for (DatanodeDescriptor datanode : dataNodes) {
cluster.add(datanode);
}
DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3,
dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false,
new HashSet<Node>(), 0, policy);
System.out.println(policy.getName() + ": " + Arrays.asList(targets));
Assert.assertEquals(2, targets.length);
Assert.assertEquals(StorageType.SSD, targets[0].getStorageType());
Assert.assertEquals(StorageType.DISK, targets[1].getStorageType());
}
@Test
public void testGetFileStoragePolicyAfterRestartNN() throws Exception {
//HDFS8219
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(REPLICATION)
.storageTypes(
new StorageType[] {StorageType.DISK, StorageType.ARCHIVE})
.build();
cluster.waitActive();
final DistributedFileSystem fs = cluster.getFileSystem();
try {
final String file = "/testScheduleWithinSameNode/file";
Path dir = new Path("/testScheduleWithinSameNode");
fs.mkdirs(dir);
// 2. Set Dir policy
fs.setStoragePolicy(dir, "COLD");
// 3. Create file
final FSDataOutputStream out = fs.create(new Path(file));
out.writeChars("testScheduleWithinSameNode");
out.close();
// 4. Set Dir policy
fs.setStoragePolicy(dir, "HOT");
HdfsFileStatus status = fs.getClient().getFileInfo(file);
// 5. get file policy, it should be parent policy.
Assert
.assertTrue(
"File storage policy should be HOT",
status.getStoragePolicy()
== HdfsServerConstants.HOT_STORAGE_POLICY_ID);
// 6. restart NameNode for reloading edits logs.
cluster.restartNameNode(true);
// 7. get file policy, it should be parent policy.
status = fs.getClient().getFileInfo(file);
Assert
.assertTrue(
"File storage policy should be HOT",
status.getStoragePolicy()
== HdfsServerConstants.HOT_STORAGE_POLICY_ID);
} finally {
cluster.shutdown();
}
}
/**
* Verify that {@link FileSystem#getAllStoragePolicies} returns all
* known storage policies for DFS.
*
* @throws IOException
*/
@Test
public void testGetAllStoragePoliciesFromFs() throws IOException {
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(REPLICATION)
.storageTypes(
new StorageType[] {StorageType.DISK, StorageType.ARCHIVE})
.build();
try {
cluster.waitActive();
// Get policies via {@link FileSystem#getAllStoragePolicies}
Set<String> policyNamesSet1 = new HashSet<>();
for (BlockStoragePolicySpi policy :
cluster.getFileSystem().getAllStoragePolicies()) {
policyNamesSet1.add(policy.getName());
}
// Get policies from the default BlockStoragePolicySuite.
BlockStoragePolicySuite suite = BlockStoragePolicySuite.createDefaultSuite();
Set<String> policyNamesSet2 = new HashSet<>();
for (BlockStoragePolicy policy : suite.getAllPolicies()) {
policyNamesSet2.add(policy.getName());
}
// Ensure that we got the same set of policies in both cases.
Assert.assertTrue(Sets.difference(policyNamesSet1, policyNamesSet2).isEmpty());
Assert.assertTrue(Sets.difference(policyNamesSet2, policyNamesSet1).isEmpty());
} finally {
cluster.shutdown();
}
}
@Test
public void testStorageType() {
final EnumMap<StorageType, Integer> map = new EnumMap<>(StorageType.class);
//put storage type is reversed order
map.put(StorageType.ARCHIVE, 1);
map.put(StorageType.DISK, 1);
map.put(StorageType.SSD, 1);
map.put(StorageType.RAM_DISK, 1);
{
final Iterator<StorageType> i = map.keySet().iterator();
Assert.assertEquals(StorageType.RAM_DISK, i.next());
Assert.assertEquals(StorageType.SSD, i.next());
Assert.assertEquals(StorageType.DISK, i.next());
Assert.assertEquals(StorageType.ARCHIVE, i.next());
}
{
final Iterator<Map.Entry<StorageType, Integer>> i
= map.entrySet().iterator();
Assert.assertEquals(StorageType.RAM_DISK, i.next().getKey());
Assert.assertEquals(StorageType.SSD, i.next().getKey());
Assert.assertEquals(StorageType.DISK, i.next().getKey());
Assert.assertEquals(StorageType.ARCHIVE, i.next().getKey());
}
}
}
| 61,702 | 42.148951 | 97 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitLegacyRead.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.net.unix.DomainSocket;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.AfterClass;
import org.junit.BeforeClass;
public class TestParallelShortCircuitLegacyRead extends TestParallelReadUtil {
@BeforeClass
static public void setupCluster() throws Exception {
DFSInputStream.tcpReadsDisabledForTesting = true;
HdfsConfiguration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY, "");
conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true);
conf.setBoolean(DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, false);
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY,
false);
conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,
UserGroupInformation.getCurrentUser().getShortUserName());
DomainSocket.disableBindPathValidation();
setupCluster(1, conf);
}
@AfterClass
static public void teardownCluster() throws Exception {
TestParallelReadUtil.teardownCluster();
}
}
| 2,039 | 41.5 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.ByteArrayOutputStream;
import java.io.OutputStream;
import java.io.PrintStream;
import java.security.PrivilegedExceptionAction;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.tools.DFSAdmin;
import org.apache.hadoop.hdfs.web.WebHdfsConstants;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.Assert;
import org.junit.Test;
import com.google.common.base.Charsets;
/** A class for testing quota-related commands */
public class TestQuota {
private void runCommand(DFSAdmin admin, boolean expectError, String... args)
throws Exception {
runCommand(admin, args, expectError);
}
private void runCommand(DFSAdmin admin, String args[], boolean expectEror)
throws Exception {
int val = admin.run(args);
if (expectEror) {
assertEquals(val, -1);
} else {
assertTrue(val>=0);
}
}
/**
* Tests to make sure we're getting human readable Quota exception messages
* Test for @link{ NSQuotaExceededException, DSQuotaExceededException}
* @throws Exception
*/
@Test
public void testDSQuotaExceededExceptionIsHumanReadable() throws Exception {
Integer bytes = 1024;
try {
throw new DSQuotaExceededException(bytes, bytes);
} catch(DSQuotaExceededException e) {
assertEquals("The DiskSpace quota is exceeded: quota = 1024 B = 1 KB"
+ " but diskspace consumed = 1024 B = 1 KB", e.getMessage());
}
}
/** Test quota related commands:
* setQuota, clrQuota, setSpaceQuota, clrSpaceQuota, and count
*/
@Test
public void testQuotaCommands() throws Exception {
final Configuration conf = new HdfsConfiguration();
// set a smaller block size so that we can test with smaller
// Space quotas
final int DEFAULT_BLOCK_SIZE = 512;
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
// Make it relinquish locks. When run serially, the result should
// be identical.
conf.setInt(DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_KEY, 2);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
final FileSystem fs = cluster.getFileSystem();
assertTrue("Not a HDFS: "+fs.getUri(),
fs instanceof DistributedFileSystem);
final DistributedFileSystem dfs = (DistributedFileSystem)fs;
DFSAdmin admin = new DFSAdmin(conf);
try {
final int fileLen = 1024;
final short replication = 5;
final long spaceQuota = fileLen * replication * 15 / 8;
// 1: create a directory /test and set its quota to be 3
final Path parent = new Path("/test");
assertTrue(dfs.mkdirs(parent));
String[] args = new String[]{"-setQuota", "3", parent.toString()};
runCommand(admin, args, false);
//try setting space quota with a 'binary prefix'
runCommand(admin, false, "-setSpaceQuota", "2t", parent.toString());
assertEquals(2L<<40, dfs.getContentSummary(parent).getSpaceQuota());
// set diskspace quota to 10000
runCommand(admin, false, "-setSpaceQuota",
Long.toString(spaceQuota), parent.toString());
// 2: create directory /test/data0
final Path childDir0 = new Path(parent, "data0");
assertTrue(dfs.mkdirs(childDir0));
// 3: create a file /test/datafile0
final Path childFile0 = new Path(parent, "datafile0");
DFSTestUtil.createFile(fs, childFile0, fileLen, replication, 0);
// 4: count -q /test
ContentSummary c = dfs.getContentSummary(parent);
assertEquals(c.getFileCount()+c.getDirectoryCount(), 3);
assertEquals(c.getQuota(), 3);
assertEquals(c.getSpaceConsumed(), fileLen*replication);
assertEquals(c.getSpaceQuota(), spaceQuota);
// 5: count -q /test/data0
c = dfs.getContentSummary(childDir0);
assertEquals(c.getFileCount()+c.getDirectoryCount(), 1);
assertEquals(c.getQuota(), -1);
// check disk space consumed
c = dfs.getContentSummary(parent);
assertEquals(c.getSpaceConsumed(), fileLen*replication);
// 6: create a directory /test/data1
final Path childDir1 = new Path(parent, "data1");
boolean hasException = false;
try {
assertFalse(dfs.mkdirs(childDir1));
} catch (QuotaExceededException e) {
hasException = true;
}
assertTrue(hasException);
OutputStream fout;
// 7: create a file /test/datafile1
final Path childFile1 = new Path(parent, "datafile1");
hasException = false;
try {
fout = dfs.create(childFile1);
} catch (QuotaExceededException e) {
hasException = true;
}
assertTrue(hasException);
// 8: clear quota /test
runCommand(admin, new String[]{"-clrQuota", parent.toString()}, false);
c = dfs.getContentSummary(parent);
assertEquals(c.getQuota(), -1);
assertEquals(c.getSpaceQuota(), spaceQuota);
// 9: clear quota /test/data0
runCommand(admin, new String[]{"-clrQuota", childDir0.toString()}, false);
c = dfs.getContentSummary(childDir0);
assertEquals(c.getQuota(), -1);
// 10: create a file /test/datafile1
fout = dfs.create(childFile1, replication);
// 10.s: but writing fileLen bytes should result in an quota exception
try {
fout.write(new byte[fileLen]);
fout.close();
Assert.fail();
} catch (QuotaExceededException e) {
IOUtils.closeStream(fout);
}
//delete the file
dfs.delete(childFile1, false);
// 9.s: clear diskspace quota
runCommand(admin, false, "-clrSpaceQuota", parent.toString());
c = dfs.getContentSummary(parent);
assertEquals(c.getQuota(), -1);
assertEquals(c.getSpaceQuota(), -1);
// now creating childFile1 should succeed
DFSTestUtil.createFile(dfs, childFile1, fileLen, replication, 0);
// 11: set the quota of /test to be 1
// HADOOP-5872 - we can set quota even if it is immediately violated
args = new String[]{"-setQuota", "1", parent.toString()};
runCommand(admin, args, false);
runCommand(admin, false, "-setSpaceQuota", // for space quota
Integer.toString(fileLen), args[2]);
// 12: set the quota of /test/data0 to be 1
args = new String[]{"-setQuota", "1", childDir0.toString()};
runCommand(admin, args, false);
// 13: not able create a directory under data0
hasException = false;
try {
assertFalse(dfs.mkdirs(new Path(childDir0, "in")));
} catch (QuotaExceededException e) {
hasException = true;
}
assertTrue(hasException);
c = dfs.getContentSummary(childDir0);
assertEquals(c.getDirectoryCount()+c.getFileCount(), 1);
assertEquals(c.getQuota(), 1);
// 14a: set quota on a non-existent directory
Path nonExistentPath = new Path("/test1");
assertFalse(dfs.exists(nonExistentPath));
args = new String[]{"-setQuota", "1", nonExistentPath.toString()};
runCommand(admin, args, true);
runCommand(admin, true, "-setSpaceQuota", "1g", // for space quota
nonExistentPath.toString());
// 14b: set quota on a file
assertTrue(dfs.isFile(childFile0));
args[1] = childFile0.toString();
runCommand(admin, args, true);
// same for space quota
runCommand(admin, true, "-setSpaceQuota", "1t", args[1]);
// 15a: clear quota on a file
args[0] = "-clrQuota";
runCommand(admin, args, true);
runCommand(admin, true, "-clrSpaceQuota", args[1]);
// 15b: clear quota on a non-existent directory
args[1] = nonExistentPath.toString();
runCommand(admin, args, true);
runCommand(admin, true, "-clrSpaceQuota", args[1]);
// 16a: set the quota of /test to be 0
args = new String[]{"-setQuota", "0", parent.toString()};
runCommand(admin, args, true);
runCommand(admin, true, "-setSpaceQuota", "0", args[2]);
// 16b: set the quota of /test to be -1
args[1] = "-1";
runCommand(admin, args, true);
runCommand(admin, true, "-setSpaceQuota", args[1], args[2]);
// 16c: set the quota of /test to be Long.MAX_VALUE+1
args[1] = String.valueOf(Long.MAX_VALUE+1L);
runCommand(admin, args, true);
runCommand(admin, true, "-setSpaceQuota", args[1], args[2]);
// 16d: set the quota of /test to be a non integer
args[1] = "33aa1.5";
runCommand(admin, args, true);
runCommand(admin, true, "-setSpaceQuota", args[1], args[2]);
// 16e: set space quota with a value larger than Long.MAX_VALUE
runCommand(admin, true, "-setSpaceQuota",
(Long.MAX_VALUE/1024/1024 + 1024) + "m", args[2]);
// 17: setQuota by a non-administrator
final String username = "userxx";
UserGroupInformation ugi =
UserGroupInformation.createUserForTesting(username,
new String[]{"groupyy"});
final String[] args2 = args.clone(); // need final ref for doAs block
ugi.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
assertEquals("Not running as new user", username,
UserGroupInformation.getCurrentUser().getShortUserName());
DFSAdmin userAdmin = new DFSAdmin(conf);
args2[1] = "100";
runCommand(userAdmin, args2, true);
runCommand(userAdmin, true, "-setSpaceQuota", "1g", args2[2]);
// 18: clrQuota by a non-administrator
String[] args3 = new String[] {"-clrQuota", parent.toString()};
runCommand(userAdmin, args3, true);
runCommand(userAdmin, true, "-clrSpaceQuota", args3[1]);
return null;
}
});
// 19: clrQuota on the root directory ("/") should fail
runCommand(admin, true, "-clrQuota", "/");
// 20: setQuota on the root directory ("/") should succeed
runCommand(admin, false, "-setQuota", "1000000", "/");
runCommand(admin, true, "-clrQuota", "/");
runCommand(admin, false, "-clrSpaceQuota", "/");
runCommand(admin, new String[]{"-clrQuota", parent.toString()}, false);
runCommand(admin, false, "-clrSpaceQuota", parent.toString());
// 2: create directory /test/data2
final Path childDir2 = new Path(parent, "data2");
assertTrue(dfs.mkdirs(childDir2));
final Path childFile2 = new Path(childDir2, "datafile2");
final Path childFile3 = new Path(childDir2, "datafile3");
final long spaceQuota2 = DEFAULT_BLOCK_SIZE * replication;
final long fileLen2 = DEFAULT_BLOCK_SIZE;
// set space quota to a real low value
runCommand(admin, false, "-setSpaceQuota", Long.toString(spaceQuota2), childDir2.toString());
// clear space quota
runCommand(admin, false, "-clrSpaceQuota", childDir2.toString());
// create a file that is greater than the size of space quota
DFSTestUtil.createFile(fs, childFile2, fileLen2, replication, 0);
// now set space quota again. This should succeed
runCommand(admin, false, "-setSpaceQuota", Long.toString(spaceQuota2), childDir2.toString());
hasException = false;
try {
DFSTestUtil.createFile(fs, childFile3, fileLen2, replication, 0);
} catch (DSQuotaExceededException e) {
hasException = true;
}
assertTrue(hasException);
// now test the same for root
final Path childFile4 = new Path("/", "datafile2");
final Path childFile5 = new Path("/", "datafile3");
runCommand(admin, true, "-clrQuota", "/");
runCommand(admin, false, "-clrSpaceQuota", "/");
// set space quota to a real low value
runCommand(admin, false, "-setSpaceQuota", Long.toString(spaceQuota2), "/");
runCommand(admin, false, "-clrSpaceQuota", "/");
DFSTestUtil.createFile(fs, childFile4, fileLen2, replication, 0);
runCommand(admin, false, "-setSpaceQuota", Long.toString(spaceQuota2), "/");
hasException = false;
try {
DFSTestUtil.createFile(fs, childFile5, fileLen2, replication, 0);
} catch (DSQuotaExceededException e) {
hasException = true;
}
assertTrue(hasException);
assertEquals(4, cluster.getNamesystem().getFSDirectory().getYieldCount());
} finally {
cluster.shutdown();
}
}
/** Test commands that change the size of the name space:
* mkdirs, rename, and delete */
@Test
public void testNamespaceCommands() throws Exception {
final Configuration conf = new HdfsConfiguration();
// Make it relinquish locks. When run serially, the result should
// be identical.
conf.setInt(DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_KEY, 2);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
final DistributedFileSystem dfs = cluster.getFileSystem();
try {
// 1: create directory /nqdir0/qdir1/qdir20/nqdir30
assertTrue(dfs.mkdirs(new Path("/nqdir0/qdir1/qdir20/nqdir30")));
// 2: set the quota of /nqdir0/qdir1 to be 6
final Path quotaDir1 = new Path("/nqdir0/qdir1");
dfs.setQuota(quotaDir1, 6, HdfsConstants.QUOTA_DONT_SET);
ContentSummary c = dfs.getContentSummary(quotaDir1);
assertEquals(c.getDirectoryCount(), 3);
assertEquals(c.getQuota(), 6);
// 3: set the quota of /nqdir0/qdir1/qdir20 to be 7
final Path quotaDir2 = new Path("/nqdir0/qdir1/qdir20");
dfs.setQuota(quotaDir2, 7, HdfsConstants.QUOTA_DONT_SET);
c = dfs.getContentSummary(quotaDir2);
assertEquals(c.getDirectoryCount(), 2);
assertEquals(c.getQuota(), 7);
// 4: Create directory /nqdir0/qdir1/qdir21 and set its quota to 2
final Path quotaDir3 = new Path("/nqdir0/qdir1/qdir21");
assertTrue(dfs.mkdirs(quotaDir3));
dfs.setQuota(quotaDir3, 2, HdfsConstants.QUOTA_DONT_SET);
c = dfs.getContentSummary(quotaDir3);
assertEquals(c.getDirectoryCount(), 1);
assertEquals(c.getQuota(), 2);
// 5: Create directory /nqdir0/qdir1/qdir21/nqdir32
Path tempPath = new Path(quotaDir3, "nqdir32");
assertTrue(dfs.mkdirs(tempPath));
c = dfs.getContentSummary(quotaDir3);
assertEquals(c.getDirectoryCount(), 2);
assertEquals(c.getQuota(), 2);
// 6: Create directory /nqdir0/qdir1/qdir21/nqdir33
tempPath = new Path(quotaDir3, "nqdir33");
boolean hasException = false;
try {
assertFalse(dfs.mkdirs(tempPath));
} catch (NSQuotaExceededException e) {
hasException = true;
}
assertTrue(hasException);
c = dfs.getContentSummary(quotaDir3);
assertEquals(c.getDirectoryCount(), 2);
assertEquals(c.getQuota(), 2);
// 7: Create directory /nqdir0/qdir1/qdir20/nqdir31
tempPath = new Path(quotaDir2, "nqdir31");
assertTrue(dfs.mkdirs(tempPath));
c = dfs.getContentSummary(quotaDir2);
assertEquals(c.getDirectoryCount(), 3);
assertEquals(c.getQuota(), 7);
c = dfs.getContentSummary(quotaDir1);
assertEquals(c.getDirectoryCount(), 6);
assertEquals(c.getQuota(), 6);
// 8: Create directory /nqdir0/qdir1/qdir20/nqdir33
tempPath = new Path(quotaDir2, "nqdir33");
hasException = false;
try {
assertFalse(dfs.mkdirs(tempPath));
} catch (NSQuotaExceededException e) {
hasException = true;
}
assertTrue(hasException);
// 9: Move /nqdir0/qdir1/qdir21/nqdir32 /nqdir0/qdir1/qdir20/nqdir30
tempPath = new Path(quotaDir2, "nqdir30");
dfs.rename(new Path(quotaDir3, "nqdir32"), tempPath);
c = dfs.getContentSummary(quotaDir2);
assertEquals(c.getDirectoryCount(), 4);
assertEquals(c.getQuota(), 7);
c = dfs.getContentSummary(quotaDir1);
assertEquals(c.getDirectoryCount(), 6);
assertEquals(c.getQuota(), 6);
// 10: Move /nqdir0/qdir1/qdir20/nqdir30 to /nqdir0/qdir1/qdir21
hasException = false;
try {
assertFalse(dfs.rename(tempPath, quotaDir3));
} catch (NSQuotaExceededException e) {
hasException = true;
}
assertTrue(hasException);
assertTrue(dfs.exists(tempPath));
assertFalse(dfs.exists(new Path(quotaDir3, "nqdir30")));
// 10.a: Rename /nqdir0/qdir1/qdir20/nqdir30 to /nqdir0/qdir1/qdir21/nqdir32
hasException = false;
try {
assertFalse(dfs.rename(tempPath, new Path(quotaDir3, "nqdir32")));
} catch (QuotaExceededException e) {
hasException = true;
}
assertTrue(hasException);
assertTrue(dfs.exists(tempPath));
assertFalse(dfs.exists(new Path(quotaDir3, "nqdir32")));
// 11: Move /nqdir0/qdir1/qdir20/nqdir30 to /nqdir0
assertTrue(dfs.rename(tempPath, new Path("/nqdir0")));
c = dfs.getContentSummary(quotaDir2);
assertEquals(c.getDirectoryCount(), 2);
assertEquals(c.getQuota(), 7);
c = dfs.getContentSummary(quotaDir1);
assertEquals(c.getDirectoryCount(), 4);
assertEquals(c.getQuota(), 6);
// 12: Create directory /nqdir0/nqdir30/nqdir33
assertTrue(dfs.mkdirs(new Path("/nqdir0/nqdir30/nqdir33")));
// 13: Move /nqdir0/nqdir30 /nqdir0/qdir1/qdir20/qdir30
hasException = false;
try {
assertFalse(dfs.rename(new Path("/nqdir0/nqdir30"), tempPath));
} catch (NSQuotaExceededException e) {
hasException = true;
}
assertTrue(hasException);
// 14: Move /nqdir0/qdir1/qdir21 /nqdir0/qdir1/qdir20
assertTrue(dfs.rename(quotaDir3, quotaDir2));
c = dfs.getContentSummary(quotaDir1);
assertEquals(c.getDirectoryCount(), 4);
assertEquals(c.getQuota(), 6);
c = dfs.getContentSummary(quotaDir2);
assertEquals(c.getDirectoryCount(), 3);
assertEquals(c.getQuota(), 7);
tempPath = new Path(quotaDir2, "qdir21");
c = dfs.getContentSummary(tempPath);
assertEquals(c.getDirectoryCount(), 1);
assertEquals(c.getQuota(), 2);
// 15: Delete /nqdir0/qdir1/qdir20/qdir21
dfs.delete(tempPath, true);
c = dfs.getContentSummary(quotaDir2);
assertEquals(c.getDirectoryCount(), 2);
assertEquals(c.getQuota(), 7);
c = dfs.getContentSummary(quotaDir1);
assertEquals(c.getDirectoryCount(), 3);
assertEquals(c.getQuota(), 6);
// 16: Move /nqdir0/qdir30 /nqdir0/qdir1/qdir20
assertTrue(dfs.rename(new Path("/nqdir0/nqdir30"), quotaDir2));
c = dfs.getContentSummary(quotaDir2);
assertEquals(c.getDirectoryCount(), 5);
assertEquals(c.getQuota(), 7);
c = dfs.getContentSummary(quotaDir1);
assertEquals(c.getDirectoryCount(), 6);
assertEquals(c.getQuota(), 6);
assertEquals(14, cluster.getNamesystem().getFSDirectory().getYieldCount());
} finally {
cluster.shutdown();
}
}
/**
* Test HDFS operations that change disk space consumed by a directory tree.
* namely create, rename, delete, append, and setReplication.
*
* This is based on testNamespaceCommands() above.
*/
@Test
public void testSpaceCommands() throws Exception {
final Configuration conf = new HdfsConfiguration();
// set a smaller block size so that we can test with smaller
// diskspace quotas
conf.set(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, "512");
// Make it relinquish locks. When run serially, the result should
// be identical.
conf.setInt(DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_KEY, 2);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
final FileSystem fs = cluster.getFileSystem();
assertTrue("Not a HDFS: "+fs.getUri(),
fs instanceof DistributedFileSystem);
final DistributedFileSystem dfs = (DistributedFileSystem)fs;
try {
int fileLen = 1024;
short replication = 3;
int fileSpace = fileLen * replication;
// create directory /nqdir0/qdir1/qdir20/nqdir30
assertTrue(dfs.mkdirs(new Path("/nqdir0/qdir1/qdir20/nqdir30")));
// set the quota of /nqdir0/qdir1 to 4 * fileSpace
final Path quotaDir1 = new Path("/nqdir0/qdir1");
dfs.setQuota(quotaDir1, HdfsConstants.QUOTA_DONT_SET, 4 * fileSpace);
ContentSummary c = dfs.getContentSummary(quotaDir1);
assertEquals(c.getSpaceQuota(), 4 * fileSpace);
// set the quota of /nqdir0/qdir1/qdir20 to 6 * fileSpace
final Path quotaDir20 = new Path("/nqdir0/qdir1/qdir20");
dfs.setQuota(quotaDir20, HdfsConstants.QUOTA_DONT_SET, 6 * fileSpace);
c = dfs.getContentSummary(quotaDir20);
assertEquals(c.getSpaceQuota(), 6 * fileSpace);
// Create /nqdir0/qdir1/qdir21 and set its space quota to 2 * fileSpace
final Path quotaDir21 = new Path("/nqdir0/qdir1/qdir21");
assertTrue(dfs.mkdirs(quotaDir21));
dfs.setQuota(quotaDir21, HdfsConstants.QUOTA_DONT_SET, 2 * fileSpace);
c = dfs.getContentSummary(quotaDir21);
assertEquals(c.getSpaceQuota(), 2 * fileSpace);
// 5: Create directory /nqdir0/qdir1/qdir21/nqdir32
Path tempPath = new Path(quotaDir21, "nqdir32");
assertTrue(dfs.mkdirs(tempPath));
// create a file under nqdir32/fileDir
DFSTestUtil.createFile(dfs, new Path(tempPath, "fileDir/file1"), fileLen,
replication, 0);
c = dfs.getContentSummary(quotaDir21);
assertEquals(c.getSpaceConsumed(), fileSpace);
// Create a larger file /nqdir0/qdir1/qdir21/nqdir33/
boolean hasException = false;
try {
DFSTestUtil.createFile(dfs, new Path(quotaDir21, "nqdir33/file2"),
2*fileLen, replication, 0);
} catch (DSQuotaExceededException e) {
hasException = true;
}
assertTrue(hasException);
// delete nqdir33
assertTrue(dfs.delete(new Path(quotaDir21, "nqdir33"), true));
c = dfs.getContentSummary(quotaDir21);
assertEquals(c.getSpaceConsumed(), fileSpace);
assertEquals(c.getSpaceQuota(), 2*fileSpace);
// Verify space before the move:
c = dfs.getContentSummary(quotaDir20);
assertEquals(c.getSpaceConsumed(), 0);
// Move /nqdir0/qdir1/qdir21/nqdir32 /nqdir0/qdir1/qdir20/nqdir30
Path dstPath = new Path(quotaDir20, "nqdir30");
Path srcPath = new Path(quotaDir21, "nqdir32");
assertTrue(dfs.rename(srcPath, dstPath));
// verify space after the move
c = dfs.getContentSummary(quotaDir20);
assertEquals(c.getSpaceConsumed(), fileSpace);
// verify space for its parent
c = dfs.getContentSummary(quotaDir1);
assertEquals(c.getSpaceConsumed(), fileSpace);
// verify space for source for the move
c = dfs.getContentSummary(quotaDir21);
assertEquals(c.getSpaceConsumed(), 0);
final Path file2 = new Path(dstPath, "fileDir/file2");
int file2Len = 2 * fileLen;
// create a larger file under /nqdir0/qdir1/qdir20/nqdir30
DFSTestUtil.createFile(dfs, file2, file2Len, replication, 0);
c = dfs.getContentSummary(quotaDir20);
assertEquals(c.getSpaceConsumed(), 3 * fileSpace);
c = dfs.getContentSummary(quotaDir21);
assertEquals(c.getSpaceConsumed(), 0);
// Reverse: Move /nqdir0/qdir1/qdir20/nqdir30 to /nqdir0/qdir1/qdir21/
hasException = false;
try {
assertFalse(dfs.rename(dstPath, srcPath));
} catch (DSQuotaExceededException e) {
hasException = true;
}
assertTrue(hasException);
// make sure no intermediate directories left by failed rename
assertFalse(dfs.exists(srcPath));
// directory should exist
assertTrue(dfs.exists(dstPath));
// verify space after the failed move
c = dfs.getContentSummary(quotaDir20);
assertEquals(c.getSpaceConsumed(), 3 * fileSpace);
c = dfs.getContentSummary(quotaDir21);
assertEquals(c.getSpaceConsumed(), 0);
// Test Append :
// verify space quota
c = dfs.getContentSummary(quotaDir1);
assertEquals(c.getSpaceQuota(), 4 * fileSpace);
// verify space before append;
c = dfs.getContentSummary(dstPath);
assertEquals(c.getSpaceConsumed(), 3 * fileSpace);
OutputStream out = dfs.append(file2);
// appending 1 fileLen should succeed
out.write(new byte[fileLen]);
out.close();
file2Len += fileLen; // after append
// verify space after append;
c = dfs.getContentSummary(dstPath);
assertEquals(c.getSpaceConsumed(), 4 * fileSpace);
// now increase the quota for quotaDir1
dfs.setQuota(quotaDir1, HdfsConstants.QUOTA_DONT_SET, 5 * fileSpace);
// Now, appending more than 1 fileLen should result in an error
out = dfs.append(file2);
hasException = false;
try {
out.write(new byte[fileLen + 1024]);
out.flush();
out.close();
} catch (DSQuotaExceededException e) {
hasException = true;
IOUtils.closeStream(out);
}
assertTrue(hasException);
file2Len += fileLen; // after partial append
// verify space after partial append
c = dfs.getContentSummary(dstPath);
assertEquals(c.getSpaceConsumed(), 5 * fileSpace);
// Test set replication :
// first reduce the replication
dfs.setReplication(file2, (short)(replication-1));
// verify that space is reduced by file2Len
c = dfs.getContentSummary(dstPath);
assertEquals(c.getSpaceConsumed(), 5 * fileSpace - file2Len);
// now try to increase the replication and and expect an error.
hasException = false;
try {
dfs.setReplication(file2, (short)(replication+1));
} catch (DSQuotaExceededException e) {
hasException = true;
}
assertTrue(hasException);
// verify space consumed remains unchanged.
c = dfs.getContentSummary(dstPath);
assertEquals(c.getSpaceConsumed(), 5 * fileSpace - file2Len);
// now increase the quota for quotaDir1 and quotaDir20
dfs.setQuota(quotaDir1, HdfsConstants.QUOTA_DONT_SET, 10 * fileSpace);
dfs.setQuota(quotaDir20, HdfsConstants.QUOTA_DONT_SET, 10 * fileSpace);
// then increasing replication should be ok.
dfs.setReplication(file2, (short)(replication+1));
// verify increase in space
c = dfs.getContentSummary(dstPath);
assertEquals(c.getSpaceConsumed(), 5 * fileSpace + file2Len);
// Test HDFS-2053 :
// Create directory /hdfs-2053
final Path quotaDir2053 = new Path("/hdfs-2053");
assertTrue(dfs.mkdirs(quotaDir2053));
// Create subdirectories /hdfs-2053/{A,B,C}
final Path quotaDir2053_A = new Path(quotaDir2053, "A");
assertTrue(dfs.mkdirs(quotaDir2053_A));
final Path quotaDir2053_B = new Path(quotaDir2053, "B");
assertTrue(dfs.mkdirs(quotaDir2053_B));
final Path quotaDir2053_C = new Path(quotaDir2053, "C");
assertTrue(dfs.mkdirs(quotaDir2053_C));
// Factors to vary the sizes of test files created in each subdir.
// The actual factors are not really important but they allow us to create
// identifiable file sizes per subdir, which helps during debugging.
int sizeFactorA = 1;
int sizeFactorB = 2;
int sizeFactorC = 4;
// Set space quota for subdirectory C
dfs.setQuota(quotaDir2053_C, HdfsConstants.QUOTA_DONT_SET,
(sizeFactorC + 1) * fileSpace);
c = dfs.getContentSummary(quotaDir2053_C);
assertEquals(c.getSpaceQuota(), (sizeFactorC + 1) * fileSpace);
// Create a file under subdirectory A
DFSTestUtil.createFile(dfs, new Path(quotaDir2053_A, "fileA"),
sizeFactorA * fileLen, replication, 0);
c = dfs.getContentSummary(quotaDir2053_A);
assertEquals(c.getSpaceConsumed(), sizeFactorA * fileSpace);
// Create a file under subdirectory B
DFSTestUtil.createFile(dfs, new Path(quotaDir2053_B, "fileB"),
sizeFactorB * fileLen, replication, 0);
c = dfs.getContentSummary(quotaDir2053_B);
assertEquals(c.getSpaceConsumed(), sizeFactorB * fileSpace);
// Create a file under subdirectory C (which has a space quota)
DFSTestUtil.createFile(dfs, new Path(quotaDir2053_C, "fileC"),
sizeFactorC * fileLen, replication, 0);
c = dfs.getContentSummary(quotaDir2053_C);
assertEquals(c.getSpaceConsumed(), sizeFactorC * fileSpace);
// Check space consumed for /hdfs-2053
c = dfs.getContentSummary(quotaDir2053);
assertEquals(c.getSpaceConsumed(),
(sizeFactorA + sizeFactorB + sizeFactorC) * fileSpace);
assertEquals(20, cluster.getNamesystem().getFSDirectory().getYieldCount());
} finally {
cluster.shutdown();
}
}
private static void checkContentSummary(final ContentSummary expected,
final ContentSummary computed) {
assertEquals(expected.toString(), computed.toString());
}
/**
* Violate a space quota using files of size < 1 block. Test that block
* allocation conservatively assumes that for quota checking the entire
* space of the block is used.
*/
@Test
public void testBlockAllocationAdjustsUsageConservatively()
throws Exception {
Configuration conf = new HdfsConfiguration();
final int BLOCK_SIZE = 6 * 1024;
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
conf.setBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
DFSAdmin admin = new DFSAdmin(conf);
final String nnAddr = conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
final String webhdfsuri = WebHdfsConstants.WEBHDFS_SCHEME + "://" + nnAddr;
System.out.println("webhdfsuri=" + webhdfsuri);
final FileSystem webhdfs = new Path(webhdfsuri).getFileSystem(conf);
try {
Path dir = new Path("/test");
Path file1 = new Path("/test/test1");
Path file2 = new Path("/test/test2");
boolean exceededQuota = false;
final int QUOTA_SIZE = 3 * BLOCK_SIZE; // total space usage including
// repl.
final int FILE_SIZE = BLOCK_SIZE / 2;
ContentSummary c;
// Create the directory and set the quota
assertTrue(fs.mkdirs(dir));
runCommand(admin, false, "-setSpaceQuota", Integer.toString(QUOTA_SIZE),
dir.toString());
// Creating a file should use half the quota
DFSTestUtil.createFile(fs, file1, FILE_SIZE, (short) 3, 1L);
DFSTestUtil.waitReplication(fs, file1, (short) 3);
c = fs.getContentSummary(dir);
checkContentSummary(c, webhdfs.getContentSummary(dir));
assertEquals("Quota is half consumed", QUOTA_SIZE / 2,
c.getSpaceConsumed());
// We can not create the 2nd file because even though the total spaced
// used by two files (2 * 3 * 512/2) would fit within the quota (3 * 512)
// when a block for a file is created the space used is adjusted
// conservatively (3 * block size, ie assumes a full block is written)
// which will violate the quota (3 * block size) since we've already
// used half the quota for the first file.
try {
DFSTestUtil.createFile(fs, file2, FILE_SIZE, (short) 3, 1L);
} catch (QuotaExceededException e) {
exceededQuota = true;
}
assertTrue("Quota not exceeded", exceededQuota);
} finally {
cluster.shutdown();
}
}
/**
* Like the previous test but create many files. This covers bugs where
* the quota adjustment is incorrect but it takes many files to accrue
* a big enough accounting error to violate the quota.
*/
@Test
public void testMultipleFilesSmallerThanOneBlock() throws Exception {
Configuration conf = new HdfsConfiguration();
final int BLOCK_SIZE = 6 * 1024;
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
conf.setBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
// Make it relinquish locks. When run serially, the result should
// be identical.
conf.setInt(DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_KEY, 2);
MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
DFSAdmin admin = new DFSAdmin(conf);
final String nnAddr = conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
final String webhdfsuri = WebHdfsConstants.WEBHDFS_SCHEME + "://" + nnAddr;
System.out.println("webhdfsuri=" + webhdfsuri);
final FileSystem webhdfs = new Path(webhdfsuri).getFileSystem(conf);
try {
Path dir = new Path("/test");
boolean exceededQuota = false;
ContentSummary c;
// 1kb file
// 6kb block
// 192kb quota
final int FILE_SIZE = 1024;
final int QUOTA_SIZE = 32 * (int) fs.getDefaultBlockSize(dir);
assertEquals(6 * 1024, fs.getDefaultBlockSize(dir));
assertEquals(192 * 1024, QUOTA_SIZE);
// Create the dir and set the quota. We need to enable the quota before
// writing the files as setting the quota afterwards will over-write
// the cached disk space used for quota verification with the actual
// amount used as calculated by INode#spaceConsumedInTree.
assertTrue(fs.mkdirs(dir));
runCommand(admin, false, "-setSpaceQuota", Integer.toString(QUOTA_SIZE),
dir.toString());
// We can create at most 59 files because block allocation is
// conservative and initially assumes a full block is used, so we
// need to leave at least 3 * BLOCK_SIZE free space when allocating
// the last block: (58 * 3 * 1024) (3 * 6 * 1024) = 192kb
for (int i = 0; i < 59; i++) {
Path file = new Path("/test/test"+i);
DFSTestUtil.createFile(fs, file, FILE_SIZE, (short) 3, 1L);
DFSTestUtil.waitReplication(fs, file, (short) 3);
}
// Should account for all 59 files (almost QUOTA_SIZE)
c = fs.getContentSummary(dir);
checkContentSummary(c, webhdfs.getContentSummary(dir));
assertEquals("Invalid space consumed", 59 * FILE_SIZE * 3,
c.getSpaceConsumed());
assertEquals("Invalid space consumed", QUOTA_SIZE - (59 * FILE_SIZE * 3),
3 * (fs.getDefaultBlockSize(dir) - FILE_SIZE));
// Now check that trying to create another file violates the quota
try {
Path file = new Path("/test/test59");
DFSTestUtil.createFile(fs, file, FILE_SIZE, (short) 3, 1L);
DFSTestUtil.waitReplication(fs, file, (short) 3);
} catch (QuotaExceededException e) {
exceededQuota = true;
}
assertTrue("Quota not exceeded", exceededQuota);
assertEquals(2, cluster.getNamesystem().getFSDirectory().getYieldCount());
} finally {
cluster.shutdown();
}
}
@Test
public void testSetSpaceQuotaWhenStorageTypeIsWrong() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.set(FS_DEFAULT_NAME_KEY, "hdfs://127.0.0.1:8020");
DFSAdmin admin = new DFSAdmin(conf);
ByteArrayOutputStream err = new ByteArrayOutputStream();
System.setErr(new PrintStream(err));
String[] args = { "-setSpaceQuota", "100", "-storageType", "COLD",
"/testDir" };
admin.run(args);
String errOutput = new String(err.toByteArray(), Charsets.UTF_8);
assertTrue(errOutput.contains(StorageType.getTypesSupportingQuota()
.toString()));
}
}
| 37,673 | 38.782471 | 99 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationClient.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
import org.apache.hadoop.io.IOUtils;
import org.apache.log4j.Level;
import org.junit.Test;
/**
* This class tests that a file need not be closed before its
* data can be read by another client.
*/
public class TestFileCreationClient {
static final String DIR = "/" + TestFileCreationClient.class.getSimpleName() + "/";
{
((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger)InterDatanodeProtocol.LOG).getLogger().setLevel(Level.ALL);
}
/** Test lease recovery Triggered by DFSClient. */
@Test
public void testClientTriggeredLeaseRecovery() throws Exception {
final int REPLICATION = 3;
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, REPLICATION);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION).build();
try {
final FileSystem fs = cluster.getFileSystem();
final Path dir = new Path("/wrwelkj");
SlowWriter[] slowwriters = new SlowWriter[10];
for(int i = 0; i < slowwriters.length; i++) {
slowwriters[i] = new SlowWriter(fs, new Path(dir, "file" + i));
}
try {
for(int i = 0; i < slowwriters.length; i++) {
slowwriters[i].start();
}
Thread.sleep(1000); // let writers get started
//stop a datanode, it should have least recover.
cluster.stopDataNode(AppendTestUtil.nextInt(REPLICATION));
//let the slow writer writes a few more seconds
System.out.println("Wait a few seconds");
Thread.sleep(5000);
}
finally {
for(int i = 0; i < slowwriters.length; i++) {
if (slowwriters[i] != null) {
slowwriters[i].running = false;
slowwriters[i].interrupt();
}
}
for(int i = 0; i < slowwriters.length; i++) {
if (slowwriters[i] != null) {
slowwriters[i].join();
}
}
}
//Verify the file
System.out.println("Verify the file");
for(int i = 0; i < slowwriters.length; i++) {
System.out.println(slowwriters[i].filepath + ": length="
+ fs.getFileStatus(slowwriters[i].filepath).getLen());
FSDataInputStream in = null;
try {
in = fs.open(slowwriters[i].filepath);
for(int j = 0, x; (x = in.read()) != -1; j++) {
assertEquals(j, x);
}
}
finally {
IOUtils.closeStream(in);
}
}
} finally {
if (cluster != null) {cluster.shutdown();}
}
}
static class SlowWriter extends Thread {
final FileSystem fs;
final Path filepath;
boolean running = true;
SlowWriter(FileSystem fs, Path filepath) {
super(SlowWriter.class.getSimpleName() + ":" + filepath);
this.fs = fs;
this.filepath = filepath;
}
@Override
public void run() {
FSDataOutputStream out = null;
int i = 0;
try {
out = fs.create(filepath);
for(; running; i++) {
System.out.println(getName() + " writes " + i);
out.write(i);
out.hflush();
sleep(100);
}
}
catch(Exception e) {
System.out.println(getName() + " dies: e=" + e);
}
finally {
System.out.println(getName() + ": i=" + i);
IOUtils.closeStream(out);
}
}
}
}
| 5,104 | 32.807947 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assume.assumeTrue;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.test.PathUtils;
import org.junit.Before;
import org.junit.Test;
/**
* Tests MiniDFS cluster setup/teardown and isolation.
* Every instance is brought up with a new data dir, to ensure that
* shutdown work in background threads don't interfere with bringing up
* the new cluster.
*/
public class TestMiniDFSCluster {
private static final String CLUSTER_1 = "cluster1";
private static final String CLUSTER_2 = "cluster2";
private static final String CLUSTER_3 = "cluster3";
private static final String CLUSTER_4 = "cluster4";
private static final String CLUSTER_5 = "cluster5";
protected File testDataPath;
@Before
public void setUp() {
testDataPath = new File(PathUtils.getTestDir(getClass()), "miniclusters");
}
/**
* Verify that without system properties the cluster still comes up, provided
* the configuration is set
*
* @throws Throwable on a failure
*/
@Test(timeout=100000)
public void testClusterWithoutSystemProperties() throws Throwable {
System.clearProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA);
Configuration conf = new HdfsConfiguration();
File testDataCluster1 = new File(testDataPath, CLUSTER_1);
String c1Path = testDataCluster1.getAbsolutePath();
conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, c1Path);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
assertEquals(new File(c1Path + "/data"),
new File(cluster.getDataDirectory()));
} finally {
cluster.shutdown();
}
}
/**
* Bring up two clusters and assert that they are in different directories.
* @throws Throwable on a failure
*/
@Test(timeout=100000)
public void testDualClusters() throws Throwable {
File testDataCluster2 = new File(testDataPath, CLUSTER_2);
File testDataCluster3 = new File(testDataPath, CLUSTER_3);
Configuration conf = new HdfsConfiguration();
String c2Path = testDataCluster2.getAbsolutePath();
conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, c2Path);
MiniDFSCluster cluster2 = new MiniDFSCluster.Builder(conf).build();
MiniDFSCluster cluster3 = null;
try {
String dataDir2 = cluster2.getDataDirectory();
assertEquals(new File(c2Path + "/data"), new File(dataDir2));
//change the data dir
conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR,
testDataCluster3.getAbsolutePath());
MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
cluster3 = builder.build();
String dataDir3 = cluster3.getDataDirectory();
assertTrue("Clusters are bound to the same directory: " + dataDir2,
!dataDir2.equals(dataDir3));
} finally {
MiniDFSCluster.shutdownCluster(cluster3);
MiniDFSCluster.shutdownCluster(cluster2);
}
}
@Test(timeout=100000)
public void testIsClusterUpAfterShutdown() throws Throwable {
Configuration conf = new HdfsConfiguration();
File testDataCluster4 = new File(testDataPath, CLUSTER_4);
String c4Path = testDataCluster4.getAbsolutePath();
conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, c4Path);
MiniDFSCluster cluster4 = new MiniDFSCluster.Builder(conf).build();
try {
DistributedFileSystem dfs = cluster4.getFileSystem();
dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
cluster4.shutdown();
} finally {
while(cluster4.isClusterUp()){
Thread.sleep(1000);
}
}
}
/** MiniDFSCluster should not clobber dfs.datanode.hostname if requested */
@Test(timeout=100000)
public void testClusterSetDatanodeHostname() throws Throwable {
assumeTrue(System.getProperty("os.name").startsWith("Linux"));
Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "MYHOST");
File testDataCluster5 = new File(testDataPath, CLUSTER_5);
String c5Path = testDataCluster5.getAbsolutePath();
conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, c5Path);
MiniDFSCluster cluster5 = new MiniDFSCluster.Builder(conf)
.numDataNodes(1)
.checkDataNodeHostConfig(true)
.build();
try {
assertEquals("DataNode hostname config not respected", "MYHOST",
cluster5.getDataNodes().get(0).getDatanodeId().getHostName());
} finally {
MiniDFSCluster.shutdownCluster(cluster5);
}
}
@Test
public void testClusterSetDatanodeDifferentStorageType() throws IOException {
final Configuration conf = new HdfsConfiguration();
StorageType[][] storageType = new StorageType[][] {
{StorageType.DISK, StorageType.ARCHIVE}, {StorageType.DISK},
{StorageType.ARCHIVE}};
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(3).storageTypes(storageType).build();
try {
cluster.waitActive();
ArrayList<DataNode> dataNodes = cluster.getDataNodes();
// Check the number of directory in DN's
for (int i = 0; i < storageType.length; i++) {
assertEquals(DataNode.getStorageLocations(dataNodes.get(i).getConf())
.size(), storageType[i].length);
}
} finally {
MiniDFSCluster.shutdownCluster(cluster);
}
}
@Test
public void testClusterNoStorageTypeSetForDatanodes() throws IOException {
final Configuration conf = new HdfsConfiguration();
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(3).build();
try {
cluster.waitActive();
ArrayList<DataNode> dataNodes = cluster.getDataNodes();
// Check the number of directory in DN's
for (DataNode datanode : dataNodes) {
assertEquals(DataNode.getStorageLocations(datanode.getConf()).size(),
2);
}
} finally {
MiniDFSCluster.shutdownCluster(cluster);
}
}
}
| 7,063 | 36.978495 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeprecatedKeys.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertTrue;
import org.apache.hadoop.conf.Configuration;
import org.junit.Test;
public class TestDeprecatedKeys {
//Tests a deprecated key
@Test
public void testDeprecatedKeys() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.set("topology.script.file.name", "xyz");
String scriptFile = conf.get(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY);
assertTrue(scriptFile.equals("xyz")) ;
conf.setInt("dfs.replication.interval", 1);
String alpha = DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY;
int repInterval = conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 3) ;
assertTrue(repInterval == 1) ;
}
}
| 1,558 | 37.02439 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.EnumSet;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.HardLink;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.RemoteException;
import org.junit.Assert;
import org.junit.Test;
/**
* This class tests the building blocks that are needed to
* support HDFS appends.
*/
public class TestFileAppend{
final boolean simulatedStorage = false;
private static byte[] fileContents = null;
//
// writes to file but does not close it
//
private void writeFile(FSDataOutputStream stm) throws IOException {
byte[] buffer = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
stm.write(buffer);
}
//
// verify that the data written to the full blocks are sane
//
private void checkFile(DistributedFileSystem fileSys, Path name, int repl)
throws IOException {
boolean done = false;
// wait till all full blocks are confirmed by the datanodes.
while (!done) {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {;}
done = true;
BlockLocation[] locations = fileSys.getFileBlockLocations(
fileSys.getFileStatus(name), 0, AppendTestUtil.FILE_SIZE);
if (locations.length < AppendTestUtil.NUM_BLOCKS) {
System.out.println("Number of blocks found " + locations.length);
done = false;
continue;
}
for (int idx = 0; idx < AppendTestUtil.NUM_BLOCKS; idx++) {
if (locations[idx].getHosts().length < repl) {
System.out.println("Block index " + idx + " not yet replciated.");
done = false;
break;
}
}
}
byte[] expected =
new byte[AppendTestUtil.NUM_BLOCKS * AppendTestUtil.BLOCK_SIZE];
if (simulatedStorage) {
LocatedBlocks lbs = fileSys.getClient().getLocatedBlocks(name.toString(),
0, AppendTestUtil.FILE_SIZE);
DFSTestUtil.fillExpectedBuf(lbs, expected);
} else {
System.arraycopy(fileContents, 0, expected, 0, expected.length);
}
// do a sanity check. Read the file
// do not check file status since the file is not yet closed.
AppendTestUtil.checkFullFile(fileSys, name,
AppendTestUtil.NUM_BLOCKS * AppendTestUtil.BLOCK_SIZE,
expected, "Read 1", false);
}
/**
* Test that copy on write for blocks works correctly
* @throws IOException an exception might be thrown
*/
@Test
public void testCopyOnWrite() throws IOException {
Configuration conf = new HdfsConfiguration();
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = cluster.getFileSystem();
InetSocketAddress addr = new InetSocketAddress("localhost",
cluster.getNameNodePort());
DFSClient client = new DFSClient(addr, conf);
try {
// create a new file, write to it and close it.
//
Path file1 = new Path("/filestatus.dat");
FSDataOutputStream stm = AppendTestUtil.createFile(fs, file1, 1);
writeFile(stm);
stm.close();
// Get a handle to the datanode
DataNode[] dn = cluster.listDataNodes();
assertTrue("There should be only one datanode but found " + dn.length,
dn.length == 1);
LocatedBlocks locations = client.getNamenode().getBlockLocations(
file1.toString(), 0, Long.MAX_VALUE);
List<LocatedBlock> blocks = locations.getLocatedBlocks();
//
// Create hard links for a few of the blocks
//
for (int i = 0; i < blocks.size(); i = i + 2) {
ExtendedBlock b = blocks.get(i).getBlock();
final File f = DataNodeTestUtils.getFile(dn[0],
b.getBlockPoolId(), b.getLocalBlock().getBlockId());
File link = new File(f.toString() + ".link");
System.out.println("Creating hardlink for File " + f + " to " + link);
HardLink.createHardLink(f, link);
}
//
// Detach all blocks. This should remove hardlinks (if any)
//
for (int i = 0; i < blocks.size(); i++) {
ExtendedBlock b = blocks.get(i).getBlock();
System.out.println("testCopyOnWrite detaching block " + b);
assertTrue("Detaching block " + b + " should have returned true",
DataNodeTestUtils.unlinkBlock(dn[0], b, 1));
}
// Since the blocks were already detached earlier, these calls should
// return false
//
for (int i = 0; i < blocks.size(); i++) {
ExtendedBlock b = blocks.get(i).getBlock();
System.out.println("testCopyOnWrite detaching block " + b);
assertTrue("Detaching block " + b + " should have returned false",
!DataNodeTestUtils.unlinkBlock(dn[0], b, 1));
}
} finally {
client.close();
fs.close();
cluster.shutdown();
}
}
/**
* Test a simple flush on a simple HDFS file.
* @throws IOException an exception might be thrown
*/
@Test
public void testSimpleFlush() throws IOException {
Configuration conf = new HdfsConfiguration();
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
DistributedFileSystem fs = cluster.getFileSystem();
try {
// create a new file.
Path file1 = new Path("/simpleFlush.dat");
FSDataOutputStream stm = AppendTestUtil.createFile(fs, file1, 1);
System.out.println("Created file simpleFlush.dat");
// write to file
int mid = AppendTestUtil.FILE_SIZE /2;
stm.write(fileContents, 0, mid);
stm.hflush();
System.out.println("Wrote and Flushed first part of file.");
// write the remainder of the file
stm.write(fileContents, mid, AppendTestUtil.FILE_SIZE - mid);
System.out.println("Written second part of file");
stm.hflush();
stm.hflush();
System.out.println("Wrote and Flushed second part of file.");
// verify that full blocks are sane
checkFile(fs, file1, 1);
stm.close();
System.out.println("Closed file.");
// verify that entire file is good
AppendTestUtil.checkFullFile(fs, file1, AppendTestUtil.FILE_SIZE,
fileContents, "Read 2");
} catch (IOException e) {
System.out.println("Exception :" + e);
throw e;
} catch (Throwable e) {
System.out.println("Throwable :" + e);
e.printStackTrace();
throw new IOException("Throwable : " + e);
} finally {
fs.close();
cluster.shutdown();
}
}
/**
* Test that file data can be flushed.
* @throws IOException an exception might be thrown
*/
@Test
public void testComplexFlush() throws IOException {
Configuration conf = new HdfsConfiguration();
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
DistributedFileSystem fs = cluster.getFileSystem();
try {
// create a new file.
Path file1 = new Path("/complexFlush.dat");
FSDataOutputStream stm = AppendTestUtil.createFile(fs, file1, 1);
System.out.println("Created file complexFlush.dat");
int start = 0;
for (start = 0; (start + 29) < AppendTestUtil.FILE_SIZE; ) {
stm.write(fileContents, start, 29);
stm.hflush();
start += 29;
}
stm.write(fileContents, start, AppendTestUtil.FILE_SIZE -start);
// need to make sure we completely write out all full blocks before
// the checkFile() call (see FSOutputSummer#flush)
stm.flush();
// verify that full blocks are sane
checkFile(fs, file1, 1);
stm.close();
// verify that entire file is good
AppendTestUtil.checkFullFile(fs, file1, AppendTestUtil.FILE_SIZE,
fileContents, "Read 2");
} catch (IOException e) {
System.out.println("Exception :" + e);
throw e;
} catch (Throwable e) {
System.out.println("Throwable :" + e);
e.printStackTrace();
throw new IOException("Throwable : " + e);
} finally {
fs.close();
cluster.shutdown();
}
}
/**
* FileNotFoundException is expected for appending to a non-exisiting file
*
* @throws FileNotFoundException as the result
*/
@Test(expected = FileNotFoundException.class)
public void testFileNotFound() throws IOException {
Configuration conf = new HdfsConfiguration();
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = cluster.getFileSystem();
try {
Path file1 = new Path("/nonexistingfile.dat");
fs.append(file1);
} finally {
fs.close();
cluster.shutdown();
}
}
/** Test two consecutive appends on a file with a full block. */
@Test
public void testAppendTwice() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
final FileSystem fs1 = cluster.getFileSystem();
final FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(conf);
try {
final Path p = new Path("/testAppendTwice/foo");
final int len = 1 << 16;
final byte[] fileContents = AppendTestUtil.initBuffer(len);
{
// create a new file with a full block.
FSDataOutputStream out = fs2.create(p, true, 4096, (short)1, len);
out.write(fileContents, 0, len);
out.close();
}
//1st append does not add any data so that the last block remains full
//and the last block in INodeFileUnderConstruction is a BlockInfo
//but not BlockInfoUnderConstruction.
fs2.append(p);
//2nd append should get AlreadyBeingCreatedException
fs1.append(p);
Assert.fail();
} catch(RemoteException re) {
AppendTestUtil.LOG.info("Got an exception:", re);
Assert.assertEquals(AlreadyBeingCreatedException.class.getName(),
re.getClassName());
} finally {
fs2.close();
fs1.close();
cluster.shutdown();
}
}
/** Test two consecutive appends on a file with a full block. */
@Test
public void testAppend2Twice() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
final DistributedFileSystem fs1 = cluster.getFileSystem();
final FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(conf);
try {
final Path p = new Path("/testAppendTwice/foo");
final int len = 1 << 16;
final byte[] fileContents = AppendTestUtil.initBuffer(len);
{
// create a new file with a full block.
FSDataOutputStream out = fs2.create(p, true, 4096, (short)1, len);
out.write(fileContents, 0, len);
out.close();
}
//1st append does not add any data so that the last block remains full
//and the last block in INodeFileUnderConstruction is a BlockInfo
//but not BlockInfoUnderConstruction.
((DistributedFileSystem) fs2).append(p,
EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null);
// 2nd append should get AlreadyBeingCreatedException
fs1.append(p);
Assert.fail();
} catch(RemoteException re) {
AppendTestUtil.LOG.info("Got an exception:", re);
Assert.assertEquals(AlreadyBeingCreatedException.class.getName(),
re.getClassName());
} finally {
fs2.close();
fs1.close();
cluster.shutdown();
}
}
/** Tests appending after soft-limit expires. */
@Test
public void testAppendAfterSoftLimit()
throws IOException, InterruptedException {
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true);
//Set small soft-limit for lease
final long softLimit = 1L;
final long hardLimit = 9999999L;
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
.build();
cluster.setLeasePeriod(softLimit, hardLimit);
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
FileSystem fs2 = new DistributedFileSystem();
fs2.initialize(fs.getUri(), conf);
final Path testPath = new Path("/testAppendAfterSoftLimit");
final byte[] fileContents = AppendTestUtil.initBuffer(32);
// create a new file without closing
FSDataOutputStream out = fs.create(testPath);
out.write(fileContents);
//Wait for > soft-limit
Thread.sleep(250);
try {
FSDataOutputStream appendStream2 = fs2.append(testPath);
appendStream2.write(fileContents);
appendStream2.close();
assertEquals(fileContents.length, fs.getFileStatus(testPath).getLen());
} finally {
fs.close();
fs2.close();
cluster.shutdown();
}
}
/** Tests appending after soft-limit expires. */
@Test
public void testAppend2AfterSoftLimit() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
//Set small soft-limit for lease
final long softLimit = 1L;
final long hardLimit = 9999999L;
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
.build();
cluster.setLeasePeriod(softLimit, hardLimit);
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
DistributedFileSystem fs2 = new DistributedFileSystem();
fs2.initialize(fs.getUri(), conf);
final Path testPath = new Path("/testAppendAfterSoftLimit");
final byte[] fileContents = AppendTestUtil.initBuffer(32);
// create a new file without closing
FSDataOutputStream out = fs.create(testPath);
out.write(fileContents);
//Wait for > soft-limit
Thread.sleep(250);
try {
FSDataOutputStream appendStream2 = fs2.append(testPath,
EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null);
appendStream2.write(fileContents);
appendStream2.close();
assertEquals(fileContents.length, fs.getFileStatus(testPath).getLen());
// make sure we now have 1 block since the first writer was revoked
LocatedBlocks blks = fs.getClient().getLocatedBlocks(testPath.toString(),
0L);
assertEquals(1, blks.getLocatedBlocks().size());
for (LocatedBlock blk : blks.getLocatedBlocks()) {
assertEquals(fileContents.length, blk.getBlockSize());
}
} finally {
fs.close();
fs2.close();
cluster.shutdown();
}
}
/**
* Old replica of the block should not be accepted as valid for append/read
*/
@Test
public void testFailedAppendBlockRejection() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.set("dfs.client.block.write.replace-datanode-on-failure.enable",
"false");
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3)
.build();
DistributedFileSystem fs = null;
try {
fs = cluster.getFileSystem();
Path path = new Path("/test");
FSDataOutputStream out = fs.create(path);
out.writeBytes("hello\n");
out.close();
// stop one datanode
DataNodeProperties dnProp = cluster.stopDataNode(0);
String dnAddress = dnProp.datanode.getXferAddress().toString();
if (dnAddress.startsWith("/")) {
dnAddress = dnAddress.substring(1);
}
// append again to bump genstamps
for (int i = 0; i < 2; i++) {
out = fs.append(path);
out.writeBytes("helloagain\n");
out.close();
}
// re-open and make the block state as underconstruction
out = fs.append(path);
cluster.restartDataNode(dnProp, true);
// wait till the block report comes
Thread.sleep(2000);
// check the block locations, this should not contain restarted datanode
BlockLocation[] locations = fs.getFileBlockLocations(path, 0,
Long.MAX_VALUE);
String[] names = locations[0].getNames();
for (String node : names) {
if (node.equals(dnAddress)) {
fail("Failed append should not be present in latest block locations.");
}
}
out.close();
} finally {
IOUtils.closeStream(fs);
cluster.shutdown();
}
}
/**
* Old replica of the block should not be accepted as valid for append/read
*/
@Test
public void testMultiAppend2() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.set("dfs.client.block.write.replace-datanode-on-failure.enable",
"false");
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3)
.build();
DistributedFileSystem fs = null;
final String hello = "hello\n";
try {
fs = cluster.getFileSystem();
Path path = new Path("/test");
FSDataOutputStream out = fs.create(path);
out.writeBytes(hello);
out.close();
// stop one datanode
DataNodeProperties dnProp = cluster.stopDataNode(0);
String dnAddress = dnProp.datanode.getXferAddress().toString();
if (dnAddress.startsWith("/")) {
dnAddress = dnAddress.substring(1);
}
// append again to bump genstamps
for (int i = 0; i < 2; i++) {
out = fs.append(path,
EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null);
out.writeBytes(hello);
out.close();
}
// re-open and make the block state as underconstruction
out = fs.append(path, EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK),
4096, null);
cluster.restartDataNode(dnProp, true);
// wait till the block report comes
Thread.sleep(2000);
out.writeBytes(hello);
out.close();
// check the block locations
LocatedBlocks blocks = fs.getClient().getLocatedBlocks(path.toString(), 0L);
// since we append the file 3 time, we should be 4 blocks
assertEquals(4, blocks.getLocatedBlocks().size());
for (LocatedBlock block : blocks.getLocatedBlocks()) {
assertEquals(hello.length(), block.getBlockSize());
}
StringBuilder sb = new StringBuilder();
for (int i = 0; i < 4; i++) {
sb.append(hello);
}
final byte[] content = sb.toString().getBytes();
AppendTestUtil.checkFullFile(fs, path, content.length, content,
"Read /test");
// restart namenode to make sure the editlog can be properly applied
cluster.restartNameNode(true);
cluster.waitActive();
AppendTestUtil.checkFullFile(fs, path, content.length, content,
"Read /test");
blocks = fs.getClient().getLocatedBlocks(path.toString(), 0L);
// since we append the file 3 time, we should be 4 blocks
assertEquals(4, blocks.getLocatedBlocks().size());
for (LocatedBlock block : blocks.getLocatedBlocks()) {
assertEquals(hello.length(), block.getBlockSize());
}
} finally {
IOUtils.closeStream(fs);
cluster.shutdown();
}
}
@Test(timeout = 10000)
public void testAppendCorruptedBlock() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
conf.setInt("dfs.min.replication", 1);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
.build();
try {
DistributedFileSystem fs = cluster.getFileSystem();
Path fileName = new Path("/appendCorruptBlock");
DFSTestUtil.createFile(fs, fileName, 512, (short) 1, 0);
DFSTestUtil.waitReplication(fs, fileName, (short) 1);
Assert.assertTrue("File not created", fs.exists(fileName));
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
cluster.corruptBlockOnDataNodes(block);
DFSTestUtil.appendFile(fs, fileName, "appendCorruptBlock");
} finally {
cluster.shutdown();
}
}
}
| 22,109 | 34.151033 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLocalDFS.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.junit.Test;
/**
* This class tests the DFS class via the FileSystem interface in a single node
* mini-cluster.
*/
public class TestLocalDFS {
private void writeFile(FileSystem fileSys, Path name) throws IOException {
DataOutputStream stm = fileSys.create(name);
stm.writeBytes("oom");
stm.close();
}
private void readFile(FileSystem fileSys, Path name) throws IOException {
DataInputStream stm = fileSys.open(name);
byte[] buffer = new byte[4];
int bytesRead = stm.read(buffer, 0 , 4);
assertEquals("oom", new String(buffer, 0 , bytesRead));
stm.close();
}
private void cleanupFile(FileSystem fileSys, Path name) throws IOException {
assertTrue(fileSys.exists(name));
fileSys.delete(name, true);
assertTrue(!fileSys.exists(name));
}
static String getUserName(FileSystem fs) {
if (fs instanceof DistributedFileSystem) {
return ((DistributedFileSystem)fs).dfs.ugi.getShortUserName();
}
return System.getProperty("user.name");
}
/**
* Tests get/set working directory in DFS.
*/
@Test
public void testWorkingDirectory() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fileSys = cluster.getFileSystem();
try {
Path orig_path = fileSys.getWorkingDirectory();
assertTrue(orig_path.isAbsolute());
Path file1 = new Path("somewhat/random.txt");
writeFile(fileSys, file1);
assertTrue(fileSys.exists(new Path(orig_path, file1.toString())));
fileSys.delete(file1, true);
Path subdir1 = new Path("/somewhere");
fileSys.setWorkingDirectory(subdir1);
writeFile(fileSys, file1);
cleanupFile(fileSys, new Path(subdir1, file1.toString()));
Path subdir2 = new Path("else");
fileSys.setWorkingDirectory(subdir2);
writeFile(fileSys, file1);
readFile(fileSys, file1);
cleanupFile(fileSys, new Path(new Path(subdir1, subdir2.toString()),
file1.toString()));
// test home directory
Path home =
fileSys.makeQualified(
new Path(DFSConfigKeys.DFS_USER_HOME_DIR_PREFIX_DEFAULT
+ "/" + getUserName(fileSys)));
Path fsHome = fileSys.getHomeDirectory();
assertEquals(home, fsHome);
} finally {
fileSys.close();
cluster.shutdown();
}
}
/**
* Tests get/set working directory in DFS.
*/
@Test(timeout=30000)
public void testHomeDirectory() throws IOException {
final String[] homeBases = new String[] {"/home", "/home/user"};
Configuration conf = new HdfsConfiguration();
for (final String homeBase : homeBases) {
conf.set(DFSConfigKeys.DFS_USER_HOME_DIR_PREFIX_KEY, homeBase);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fileSys = cluster.getFileSystem();
try {
// test home directory
Path home =
fileSys.makeQualified(
new Path(homeBase + "/" + getUserName(fileSys)));
Path fsHome = fileSys.getHomeDirectory();
assertEquals(home, fsHome);
} finally {
fileSys.close();
cluster.shutdown();
}
}
}
}
| 4,429 | 33.076923 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelRead.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.junit.AfterClass;
import org.junit.BeforeClass;
public class TestParallelRead extends TestParallelReadUtil {
@BeforeClass
static public void setupCluster() throws Exception {
// This is a test of the normal (TCP) read path. For this reason, we turn
// off both short-circuit local reads and UNIX domain socket data traffic.
HdfsConfiguration conf = new HdfsConfiguration();
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, false);
conf.setBoolean(DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC,
false);
// dfs.domain.socket.path should be ignored because the previous two keys
// were set to false. This is a regression test for HDFS-4473.
conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY, "/will/not/be/created");
setupCluster(DEFAULT_REPLICATION_FACTOR, conf);
}
@AfterClass
static public void teardownCluster() throws Exception {
TestParallelReadUtil.teardownCluster();
}
}
| 1,881 | 40.822222 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteBlockGetsBlockLengthHint.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.datanode.*;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Test;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.*;
/**
* Test to verify that the DFSClient passes the expected block length to
* the DataNode via DataTransferProtocol.
*/
public class TestWriteBlockGetsBlockLengthHint {
static final long DEFAULT_BLOCK_LENGTH = 1024;
static final long EXPECTED_BLOCK_LENGTH = DEFAULT_BLOCK_LENGTH * 2;
@Test
public void blockLengthHintIsPropagated() throws IOException {
final String METHOD_NAME = GenericTestUtils.getMethodName();
final Path path = new Path("/" + METHOD_NAME + ".dat");
Configuration conf = new HdfsConfiguration();
FsDatasetChecker.setFactory(conf);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_LENGTH);
conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
try {
cluster.waitActive();
// FsDatasetChecker#createRbw asserts during block creation if the test
// fails.
DFSTestUtil.createFile(
cluster.getFileSystem(),
path,
4096, // Buffer size.
EXPECTED_BLOCK_LENGTH,
EXPECTED_BLOCK_LENGTH,
(short) 1,
0x1BAD5EED);
} finally {
cluster.shutdown();
}
}
static class FsDatasetChecker extends SimulatedFSDataset {
static class Factory extends FsDatasetSpi.Factory<SimulatedFSDataset> {
@Override
public SimulatedFSDataset newInstance(DataNode datanode,
DataStorage storage, Configuration conf) throws IOException {
return new FsDatasetChecker(storage, conf);
}
@Override
public boolean isSimulated() {
return true;
}
}
public static void setFactory(Configuration conf) {
conf.set(DFSConfigKeys.DFS_DATANODE_FSDATASET_FACTORY_KEY,
Factory.class.getName());
}
public FsDatasetChecker(DataStorage storage, Configuration conf) {
super(storage, conf);
}
/**
* Override createRbw to verify that the block length that is passed
* is correct. This requires both DFSOutputStream and BlockReceiver to
* correctly propagate the hint to FsDatasetSpi.
*/
@Override
public synchronized ReplicaHandler createRbw(
StorageType storageType, ExtendedBlock b, boolean allowLazyPersist)
throws IOException {
assertThat(b.getLocalBlock().getNumBytes(), is(EXPECTED_BLOCK_LENGTH));
return super.createRbw(storageType, b, allowLazyPersist);
}
}
}
| 3,785 | 33.733945 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.Arrays;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.junit.Test;
/**
* This class tests that blocks can be larger than 2GB
*/
public class TestLargeBlock {
/**
{
((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)TestLargeBlock.LOG).getLogger().setLevel(Level.ALL);
}
*/
private static final Log LOG = LogFactory.getLog(TestLargeBlock.class);
// should we verify the data read back from the file? (slow)
static final boolean verifyData = true;
static final byte[] pattern = { 'D', 'E', 'A', 'D', 'B', 'E', 'E', 'F'};
static final boolean simulatedStorage = false;
// creates a file
static FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl,
final long blockSize)
throws IOException {
FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
(short) repl, blockSize);
LOG.info("createFile: Created " + name + " with " + repl + " replica.");
return stm;
}
/**
* Writes pattern to file
* @param stm FSDataOutputStream to write the file
* @param fileSize size of the file to be written
* @throws IOException in case of errors
*/
static void writeFile(FSDataOutputStream stm, final long fileSize)
throws IOException {
// write in chunks of 64 MB
final int writeSize = pattern.length * 8 * 1024 * 1024;
if (writeSize > Integer.MAX_VALUE) {
throw new IOException("A single write is too large " + writeSize);
}
long bytesToWrite = fileSize;
byte[] b = new byte[writeSize];
// initialize buffer
for (int j = 0; j < writeSize; j++) {
b[j] = pattern[j % pattern.length];
}
while (bytesToWrite > 0) {
// how many bytes we are writing in this iteration
int thiswrite = (int) Math.min(writeSize, bytesToWrite);
stm.write(b, 0, thiswrite);
bytesToWrite -= thiswrite;
}
}
/**
* Reads from file and makes sure that it matches the pattern
* @param fs a reference to FileSystem
* @param name Path of a file
* @param fileSize size of the file
* @throws IOException in case of errors
*/
static void checkFullFile(FileSystem fs, Path name, final long fileSize)
throws IOException {
// read in chunks of 128 MB
final int readSize = pattern.length * 16 * 1024 * 1024;
if (readSize > Integer.MAX_VALUE) {
throw new IOException("A single read is too large " + readSize);
}
byte[] b = new byte[readSize];
long bytesToRead = fileSize;
byte[] compb = new byte[readSize]; // buffer with correct data for comparison
if (verifyData) {
// initialize compare buffer
for (int j = 0; j < readSize; j++) {
compb[j] = pattern[j % pattern.length];
}
}
FSDataInputStream stm = fs.open(name);
while (bytesToRead > 0) {
// how many bytes we are reading in this iteration
int thisread = (int) Math.min(readSize, bytesToRead);
stm.readFully(b, 0, thisread);
if (verifyData) {
// verify data read
if (thisread == readSize) {
assertTrue("file is corrupted at or after byte " +
(fileSize - bytesToRead), Arrays.equals(b, compb));
} else {
// b was only partially filled by last read
for (int k = 0; k < thisread; k++) {
assertTrue("file is corrupted at or after byte " +
(fileSize - bytesToRead), b[k] == compb[k]);
}
}
}
LOG.debug("Before update: to read: " + bytesToRead +
"; read already: "+ thisread);
bytesToRead -= thisread;
LOG.debug("After update: to read: " + bytesToRead +
"; read already: " + thisread);
}
stm.close();
}
/**
* Test for block size of 2GB + 512B. This test can take a rather long time to
* complete on Windows (reading the file back can be slow) so we use a larger
* timeout here.
* @throws IOException in case of errors
*/
@Test (timeout = 900000)
public void testLargeBlockSize() throws IOException {
final long blockSize = 2L * 1024L * 1024L * 1024L + 512L; // 2GB + 512B
runTest(blockSize);
}
/**
* Test that we can write to and read from large blocks
* @param blockSize size of the block
* @throws IOException in case of errors
*/
public void runTest(final long blockSize) throws IOException {
// write a file that is slightly larger than 1 block
final long fileSize = blockSize + 1L;
Configuration conf = new Configuration();
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = cluster.getFileSystem();
try {
// create a new file in test data directory
Path file1 = new Path("/tmp/TestLargeBlock", blockSize + ".dat");
FSDataOutputStream stm = createFile(fs, file1, 1, blockSize);
LOG.info("File " + file1 + " created with file size " +
fileSize +
" blocksize " + blockSize);
// verify that file exists in FS namespace
assertTrue(file1 + " should be a file",
fs.getFileStatus(file1).isFile());
// write to file
writeFile(stm, fileSize);
LOG.info("File " + file1 + " written to.");
// close file
stm.close();
LOG.info("File " + file1 + " closed.");
// Make sure a client can read it
checkFullFile(fs, file1, fileSize);
// verify that file size has changed
long len = fs.getFileStatus(file1).getLen();
assertTrue(file1 + " should be of size " + fileSize +
" but found to be of size " + len,
len == fileSize);
} finally {
cluster.shutdown();
}
}
}
| 7,388 | 32.586364 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.util.Time;
import org.junit.Test;
/**
* This class tests the replication and injection of blocks of a DFS file for simulated storage.
*/
public class TestInjectionForSimulatedStorage {
private final int checksumSize = 16;
private final int blockSize = checksumSize*2;
private final int numBlocks = 4;
private final int filesize = blockSize*numBlocks;
private final int numDataNodes = 4;
private static final Log LOG = LogFactory.getLog(
"org.apache.hadoop.hdfs.TestInjectionForSimulatedStorage");
private void writeFile(FileSystem fileSys, Path name, int repl)
throws IOException {
// create and write a file that contains three blocks of data
FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
(short) repl, blockSize);
byte[] buffer = new byte[filesize];
for (int i=0; i<buffer.length; i++) {
buffer[i] = '1';
}
stm.write(buffer);
stm.close();
}
// Waits for all of the blocks to have expected replication
// Waits for all of the blocks to have expected replication
private void waitForBlockReplication(String filename,
ClientProtocol namenode,
int expected, long maxWaitSec)
throws IOException {
long start = Time.monotonicNow();
//wait for all the blocks to be replicated;
LOG.info("Checking for block replication for " + filename);
LocatedBlocks blocks = namenode.getBlockLocations(filename, 0, Long.MAX_VALUE);
assertEquals(numBlocks, blocks.locatedBlockCount());
for (int i = 0; i < numBlocks; ++i) {
LOG.info("Checking for block:" + (i+1));
while (true) { // Loop to check for block i (usually when 0 is done all will be done
blocks = namenode.getBlockLocations(filename, 0, Long.MAX_VALUE);
assertEquals(numBlocks, blocks.locatedBlockCount());
LocatedBlock block = blocks.get(i);
int actual = block.getLocations().length;
if ( actual == expected ) {
LOG.info("Got enough replicas for " + (i+1) + "th block " + block.getBlock() +
", got " + actual + ".");
break;
}
LOG.info("Not enough replicas for " + (i+1) + "th block " + block.getBlock() +
" yet. Expecting " + expected + ", got " +
actual + ".");
if (maxWaitSec > 0 &&
(Time.monotonicNow() - start) > (maxWaitSec * 1000)) {
throw new IOException("Timedout while waiting for all blocks to " +
" be replicated for " + filename);
}
try {
Thread.sleep(500);
} catch (InterruptedException ignored) {}
}
}
}
/* This test makes sure that NameNode retries all the available blocks
* for under replicated blocks. This test uses simulated storage and one
* of its features to inject blocks,
*
* It creates a file with several blocks and replication of 4.
* The cluster is then shut down - NN retains its state but the DNs are
* all simulated and hence loose their blocks.
* The blocks are then injected in one of the DNs. The expected behaviour is
* that the NN will arrange for themissing replica will be copied from a valid source.
*/
@Test
public void testInjection() throws IOException {
MiniDFSCluster cluster = null;
String testFile = "/replication-test-file";
Path testPath = new Path(testFile);
byte buffer[] = new byte[1024];
for (int i=0; i<buffer.length; i++) {
buffer[i] = '1';
}
try {
Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_REPLICATION_KEY, Integer.toString(numDataNodes));
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, checksumSize);
SimulatedFSDataset.setFactory(conf);
//first time format
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
cluster.waitActive();
String bpid = cluster.getNamesystem().getBlockPoolId();
DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost",
cluster.getNameNodePort()),
conf);
writeFile(cluster.getFileSystem(), testPath, numDataNodes);
waitForBlockReplication(testFile, dfsClient.getNamenode(), numDataNodes, 20);
List<Map<DatanodeStorage, BlockListAsLongs>> blocksList = cluster.getAllBlockReports(bpid);
cluster.shutdown();
cluster = null;
/* Start the MiniDFSCluster with more datanodes since once a writeBlock
* to a datanode node fails, same block can not be written to it
* immediately. In our case some replication attempts will fail.
*/
LOG.info("Restarting minicluster");
conf = new HdfsConfiguration();
SimulatedFSDataset.setFactory(conf);
conf.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, "0.0f");
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(numDataNodes * 2)
.format(false)
.build();
cluster.waitActive();
Set<Block> uniqueBlocks = new HashSet<Block>();
for(Map<DatanodeStorage, BlockListAsLongs> map : blocksList) {
for(BlockListAsLongs blockList : map.values()) {
for(Block b : blockList) {
uniqueBlocks.add(new Block(b));
}
}
}
// Insert all the blocks in the first data node
LOG.info("Inserting " + uniqueBlocks.size() + " blocks");
cluster.injectBlocks(0, uniqueBlocks, null);
dfsClient = new DFSClient(new InetSocketAddress("localhost",
cluster.getNameNodePort()),
conf);
waitForBlockReplication(testFile, dfsClient.getNamenode(), numDataNodes, -1);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
}
| 8,073 | 38.578431 | 97 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitReadUnCached.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.hamcrest.CoreMatchers.equalTo;
import java.io.File;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.net.unix.DomainSocket;
import org.apache.hadoop.net.unix.TemporarySocketDirectory;
import org.junit.AfterClass;
import org.junit.Assume;
import org.junit.Before;
import org.junit.BeforeClass;
/**
* This class tests short-circuit local reads without any FileInputStream or
* Socket caching. This is a regression test for HDFS-4417.
*/
public class TestParallelShortCircuitReadUnCached extends TestParallelReadUtil {
private static TemporarySocketDirectory sockDir;
@BeforeClass
static public void setupCluster() throws Exception {
if (DomainSocket.getLoadingFailureReason() != null) return;
sockDir = new TemporarySocketDirectory();
HdfsConfiguration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
new File(sockDir.getDir(),
"TestParallelShortCircuitReadUnCached._PORT.sock").getAbsolutePath());
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
// Enabling data transfer encryption should have no effect when using
// short-circuit local reads. This is a regression test for HDFS-5353.
conf.setBoolean(DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY, true);
conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY,
false);
conf.setBoolean(DFSConfigKeys.
DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, true);
// We want to test reading from stale sockets.
conf.setInt(DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY, 1);
conf.setLong(DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY,
5 * 60 * 1000);
conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY, 32);
// Avoid using the FileInputStreamCache.
conf.setInt(HdfsClientConfigKeys.Read.ShortCircuit.STREAMS_CACHE_SIZE_KEY,
0);
DomainSocket.disableBindPathValidation();
DFSInputStream.tcpReadsDisabledForTesting = true;
setupCluster(1, conf);
}
@Before
public void before() {
Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
}
@AfterClass
static public void teardownCluster() throws Exception {
if (DomainSocket.getLoadingFailureReason() != null) return;
sockDir.close();
TestParallelReadUtil.teardownCluster();
}
}
| 3,309 | 40.375 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSmallBlock.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.junit.Test;
/**
* This class tests the creation of files with block-size
* smaller than the default buffer size of 4K.
*/
public class TestSmallBlock {
static final long seed = 0xDEADBEEFL;
static final int blockSize = 1;
static final int fileSize = 20;
boolean simulatedStorage = false;
private void writeFile(FileSystem fileSys, Path name) throws IOException {
// create and write a file that contains three blocks of data
FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
(short) 1, blockSize);
byte[] buffer = new byte[fileSize];
Random rand = new Random(seed);
rand.nextBytes(buffer);
stm.write(buffer);
stm.close();
}
private void checkAndEraseData(byte[] actual, int from, byte[] expected, String message) {
for (int idx = 0; idx < actual.length; idx++) {
assertEquals(message+" byte "+(from+idx)+" differs. expected "+
expected[from+idx]+" actual "+actual[idx],
actual[idx], expected[from+idx]);
actual[idx] = 0;
}
}
private void checkFile(DistributedFileSystem fileSys, Path name)
throws IOException {
BlockLocation[] locations = fileSys.getFileBlockLocations(
fileSys.getFileStatus(name), 0, fileSize);
assertEquals("Number of blocks", fileSize, locations.length);
FSDataInputStream stm = fileSys.open(name);
byte[] expected = new byte[fileSize];
if (simulatedStorage) {
LocatedBlocks lbs = fileSys.getClient().getLocatedBlocks(name.toString(),
0, fileSize);
DFSTestUtil.fillExpectedBuf(lbs, expected);
} else {
Random rand = new Random(seed);
rand.nextBytes(expected);
}
// do a sanity check. Read the file
byte[] actual = new byte[fileSize];
stm.readFully(0, actual);
checkAndEraseData(actual, 0, expected, "Read Sanity Test");
stm.close();
}
private void cleanupFile(FileSystem fileSys, Path name) throws IOException {
assertTrue(fileSys.exists(name));
fileSys.delete(name, true);
assertTrue(!fileSys.exists(name));
}
/**
* Tests small block size in in DFS.
*/
@Test
public void testSmallBlock() throws IOException {
Configuration conf = new HdfsConfiguration();
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
conf.set(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, "1");
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
DistributedFileSystem fileSys = cluster.getFileSystem();
try {
Path file1 = new Path("/smallblocktest.dat");
writeFile(fileSys, file1);
checkFile(fileSys, file1);
cleanupFile(fileSys, file1);
} finally {
fileSys.close();
cluster.shutdown();
}
}
@Test
public void testSmallBlockSimulatedStorage() throws IOException {
simulatedStorage = true;
testSmallBlock();
simulatedStorage = false;
}
}
| 4,432 | 34.464 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRemove.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.DataOutputStream;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.junit.Test;
public class TestDFSRemove {
final Path dir = new Path("/test/remove/");
void list(FileSystem fs, String name) throws IOException {
FileSystem.LOG.info("\n\n" + name);
for(FileStatus s : fs.listStatus(dir)) {
FileSystem.LOG.info("" + s.getPath());
}
}
static void createFile(FileSystem fs, Path f) throws IOException {
DataOutputStream a_out = fs.create(f);
a_out.writeBytes("something");
a_out.close();
}
static long getTotalDfsUsed(MiniDFSCluster cluster) throws IOException {
long total = 0;
for(DataNode node : cluster.getDataNodes()) {
total += DataNodeTestUtils.getFSDataset(node).getDfsUsed();
}
return total;
}
@Test
public void testRemove() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
try {
FileSystem fs = cluster.getFileSystem();
assertTrue(fs.mkdirs(dir));
long dfsUsedStart = getTotalDfsUsed(cluster);
{
// Create 100 files
final int fileCount = 100;
for (int i = 0; i < fileCount; i++) {
Path a = new Path(dir, "a" + i);
createFile(fs, a);
}
long dfsUsedMax = getTotalDfsUsed(cluster);
// Remove 100 files
for (int i = 0; i < fileCount; i++) {
Path a = new Path(dir, "a" + i);
fs.delete(a, false);
}
// wait 3 heartbeat intervals, so that all blocks are deleted.
Thread.sleep(3 * DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT * 1000);
// all blocks should be gone now.
long dfsUsedFinal = getTotalDfsUsed(cluster);
assertEquals("All blocks should be gone. start=" + dfsUsedStart
+ " max=" + dfsUsedMax + " final=" + dfsUsedFinal, dfsUsedStart, dfsUsedFinal);
}
fs.delete(dir, true);
} finally {
if (cluster != null) {cluster.shutdown();}
}
}
}
| 3,261 | 34.075269 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInDFS.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.TestListFiles;
import org.apache.log4j.Level;
import org.junit.AfterClass;
import org.junit.BeforeClass;
/**
* This class tests the FileStatus API.
*/
public class TestListFilesInDFS extends TestListFiles {
{
((Log4JLogger)FileSystem.LOG).getLogger().setLevel(Level.ALL);
}
private static MiniDFSCluster cluster;
@BeforeClass
public static void testSetUp() throws Exception {
setTestPaths(new Path("/tmp/TestListFilesInDFS"));
cluster = new MiniDFSCluster.Builder(conf).build();
fs = cluster.getFileSystem();
fs.delete(TEST_DIR, true);
}
@AfterClass
public static void testShutdown() throws Exception {
fs.close();
cluster.shutdown();
}
protected static Path getTestDir() {
return new Path("/main_");
}
}
| 1,765 | 29.982456 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSeekBug.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ChecksumFileSystem;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.junit.Test;
/**
* This class tests the presence of seek bug as described
* in HADOOP-508
*/
public class TestSeekBug {
static final long seed = 0xDEADBEEFL;
static final int ONEMB = 1 << 20;
private void writeFile(FileSystem fileSys, Path name) throws IOException {
// create and write a file that contains 1MB
DataOutputStream stm = fileSys.create(name);
byte[] buffer = new byte[ONEMB];
Random rand = new Random(seed);
rand.nextBytes(buffer);
stm.write(buffer);
stm.close();
}
private void checkAndEraseData(byte[] actual, int from, byte[] expected, String message) {
for (int idx = 0; idx < actual.length; idx++) {
assertEquals(message+" byte "+(from+idx)+" differs. expected "+
expected[from+idx]+" actual "+actual[idx],
actual[idx], expected[from+idx]);
actual[idx] = 0;
}
}
private void seekReadFile(FileSystem fileSys, Path name) throws IOException {
FSDataInputStream stm = fileSys.open(name, 4096);
byte[] expected = new byte[ONEMB];
Random rand = new Random(seed);
rand.nextBytes(expected);
// First read 128 bytes to set count in BufferedInputStream
byte[] actual = new byte[128];
stm.read(actual, 0, actual.length);
// Now read a byte array that is bigger than the internal buffer
actual = new byte[100000];
IOUtils.readFully(stm, actual, 0, actual.length);
checkAndEraseData(actual, 128, expected, "First Read Test");
// now do a small seek, within the range that is already read
stm.seek(96036); // 4 byte seek
actual = new byte[128];
IOUtils.readFully(stm, actual, 0, actual.length);
checkAndEraseData(actual, 96036, expected, "Seek Bug");
// all done
stm.close();
}
/*
* Read some data, skip a few bytes and read more. HADOOP-922.
*/
private void smallReadSeek(FileSystem fileSys, Path name) throws IOException {
if (fileSys instanceof ChecksumFileSystem) {
fileSys = ((ChecksumFileSystem)fileSys).getRawFileSystem();
}
// Make the buffer size small to trigger code for HADOOP-922
FSDataInputStream stmRaw = fileSys.open(name, 1);
byte[] expected = new byte[ONEMB];
Random rand = new Random(seed);
rand.nextBytes(expected);
// Issue a simple read first.
byte[] actual = new byte[128];
stmRaw.seek(100000);
stmRaw.read(actual, 0, actual.length);
checkAndEraseData(actual, 100000, expected, "First Small Read Test");
// now do a small seek of 4 bytes, within the same block.
int newpos1 = 100000 + 128 + 4;
stmRaw.seek(newpos1);
stmRaw.read(actual, 0, actual.length);
checkAndEraseData(actual, newpos1, expected, "Small Seek Bug 1");
// seek another 256 bytes this time
int newpos2 = newpos1 + 256;
stmRaw.seek(newpos2);
stmRaw.read(actual, 0, actual.length);
checkAndEraseData(actual, newpos2, expected, "Small Seek Bug 2");
// all done
stmRaw.close();
}
private void cleanupFile(FileSystem fileSys, Path name) throws IOException {
assertTrue(fileSys.exists(name));
fileSys.delete(name, true);
assertTrue(!fileSys.exists(name));
}
/**
* Test if the seek bug exists in FSDataInputStream in DFS.
*/
@Test
public void testSeekBugDFS() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fileSys = cluster.getFileSystem();
try {
Path file1 = new Path("seektest.dat");
writeFile(fileSys, file1);
seekReadFile(fileSys, file1);
smallReadSeek(fileSys, file1);
cleanupFile(fileSys, file1);
} finally {
fileSys.close();
cluster.shutdown();
}
}
/**
* Test (expected to throw IOE) for negative
* <code>FSDataInpuStream#seek</code> argument
*/
@Test (expected=IOException.class)
public void testNegativeSeek() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = cluster.getFileSystem();
try {
Path seekFile = new Path("seekboundaries.dat");
DFSTestUtil.createFile(
fs,
seekFile,
ONEMB,
fs.getDefaultReplication(seekFile),
seed);
FSDataInputStream stream = fs.open(seekFile);
// Perform "safe seek" (expected to pass)
stream.seek(65536);
assertEquals(65536, stream.getPos());
// expect IOE for this call
stream.seek(-73);
} finally {
fs.close();
cluster.shutdown();
}
}
/**
* Test (expected to throw IOE) for <code>FSDataInpuStream#seek</code>
* when the position argument is larger than the file size.
*/
@Test (expected=IOException.class)
public void testSeekPastFileSize() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = cluster.getFileSystem();
try {
Path seekFile = new Path("seekboundaries.dat");
DFSTestUtil.createFile(
fs,
seekFile,
ONEMB,
fs.getDefaultReplication(seekFile),
seed);
FSDataInputStream stream = fs.open(seekFile);
// Perform "safe seek" (expected to pass)
stream.seek(65536);
assertEquals(65536, stream.getPos());
// expect IOE for this call
stream.seek(ONEMB + ONEMB + ONEMB);
} finally {
fs.close();
cluster.shutdown();
}
}
/**
* Tests if the seek bug exists in FSDataInputStream in LocalFS.
*/
@Test
public void testSeekBugLocalFS() throws IOException {
Configuration conf = new HdfsConfiguration();
FileSystem fileSys = FileSystem.getLocal(conf);
try {
Path file1 = new Path("build/test/data", "seektest.dat");
writeFile(fileSys, file1);
seekReadFile(fileSys, file1);
cleanupFile(fileSys, file1);
} finally {
fileSys.close();
}
}
}
| 7,301 | 32.190909 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java
|
/*
* UpgradeUtilities.java
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.net.URI;
import java.util.Arrays;
import java.util.Collections;
import java.util.zip.CRC32;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceStorage;
import org.apache.hadoop.hdfs.server.datanode.DataNodeLayoutVersion;
import org.apache.hadoop.hdfs.server.datanode.DataStorage;
import org.apache.hadoop.hdfs.server.namenode.NNStorage;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import com.google.common.base.Preconditions;
import com.google.common.io.Files;
import com.google.common.primitives.Bytes;
/**
* This class defines a number of static helper methods used by the
* DFS Upgrade unit tests. By default, a singleton master populated storage
* directory is created for a Namenode (contains edits, fsimage,
* version, and time files) and a Datanode (contains version and
* block files). The master directories are lazily created. They are then
* copied by the createStorageDirs() method to create new storage
* directories of the appropriate type (Namenode or Datanode).
*/
public class UpgradeUtilities {
// Root scratch directory on local filesystem
private static final File TEST_ROOT_DIR =
new File(MiniDFSCluster.getBaseDirectory());
// The singleton master storage directory for Namenode
private static final File namenodeStorage = new File(TEST_ROOT_DIR, "namenodeMaster");
// A checksum of the contents in namenodeStorage directory
private static long namenodeStorageChecksum;
// The namespaceId of the namenodeStorage directory
private static int namenodeStorageNamespaceID;
// The clusterId of the namenodeStorage directory
private static String namenodeStorageClusterID;
// The blockpoolId of the namenodeStorage directory
private static String namenodeStorageBlockPoolID;
// The fsscTime of the namenodeStorage directory
private static long namenodeStorageFsscTime;
// The singleton master storage directory for Datanode
private static final File datanodeStorage = new File(TEST_ROOT_DIR, "datanodeMaster");
// A checksum of the contents in datanodeStorage directory
private static long datanodeStorageChecksum;
// A checksum of the contents in blockpool storage directory
private static long blockPoolStorageChecksum;
// A checksum of the contents in blockpool finalize storage directory
private static long blockPoolFinalizedStorageChecksum;
// A checksum of the contents in blockpool rbw storage directory
private static long blockPoolRbwStorageChecksum;
/**
* Initialize the data structures used by this class.
* IMPORTANT NOTE: This method must be called once before calling
* any other public method on this class.
* <p>
* Creates a singleton master populated storage
* directory for a Namenode (contains edits, fsimage,
* version, and time files) and a Datanode (contains version and
* block files). This can be a lengthy operation.
*/
public static void initialize() throws Exception {
createEmptyDirs(new String[] {TEST_ROOT_DIR.toString()});
Configuration config = new HdfsConfiguration();
config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, namenodeStorage.toString());
config.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, namenodeStorage.toString());
config.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, datanodeStorage.toString());
MiniDFSCluster cluster = null;
String bpid = null;
try {
// format data-node
createEmptyDirs(new String[] {datanodeStorage.toString()});
// format and start NameNode and start DataNode
DFSTestUtil.formatNameNode(config);
cluster = new MiniDFSCluster.Builder(config)
.numDataNodes(1)
.startupOption(StartupOption.REGULAR)
.format(false)
.manageDataDfsDirs(false)
.manageNameDfsDirs(false)
.build();
NamenodeProtocols namenode = cluster.getNameNodeRpc();
namenodeStorageNamespaceID = namenode.versionRequest().getNamespaceID();
namenodeStorageFsscTime = namenode.versionRequest().getCTime();
namenodeStorageClusterID = namenode.versionRequest().getClusterID();
namenodeStorageBlockPoolID = namenode.versionRequest().getBlockPoolID();
FileSystem fs = FileSystem.get(config);
Path baseDir = new Path("/TestUpgrade");
fs.mkdirs(baseDir);
// write some files
int bufferSize = 4096;
byte[] buffer = new byte[bufferSize];
for(int i=0; i < bufferSize; i++)
buffer[i] = (byte)('0' + i % 50);
writeFile(fs, new Path(baseDir, "file1"), buffer, bufferSize);
writeFile(fs, new Path(baseDir, "file2"), buffer, bufferSize);
// save image
namenode.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
namenode.saveNamespace();
namenode.setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
// write more files
writeFile(fs, new Path(baseDir, "file3"), buffer, bufferSize);
writeFile(fs, new Path(baseDir, "file4"), buffer, bufferSize);
bpid = cluster.getNamesystem(0).getBlockPoolId();
} finally {
// shutdown
if (cluster != null) cluster.shutdown();
FileUtil.fullyDelete(new File(namenodeStorage,"in_use.lock"));
FileUtil.fullyDelete(new File(datanodeStorage,"in_use.lock"));
}
namenodeStorageChecksum = checksumContents(NAME_NODE,
new File(namenodeStorage, "current"), false);
File dnCurDir = new File(datanodeStorage, "current");
datanodeStorageChecksum = checksumContents(DATA_NODE, dnCurDir, false);
File bpCurDir = new File(BlockPoolSliceStorage.getBpRoot(bpid, dnCurDir),
"current");
blockPoolStorageChecksum = checksumContents(DATA_NODE, bpCurDir, false);
File bpCurFinalizeDir = new File(BlockPoolSliceStorage.getBpRoot(bpid, dnCurDir),
"current/"+DataStorage.STORAGE_DIR_FINALIZED);
blockPoolFinalizedStorageChecksum = checksumContents(DATA_NODE,
bpCurFinalizeDir, true);
File bpCurRbwDir = new File(BlockPoolSliceStorage.getBpRoot(bpid, dnCurDir),
"current/"+DataStorage.STORAGE_DIR_RBW);
blockPoolRbwStorageChecksum = checksumContents(DATA_NODE, bpCurRbwDir,
false);
}
// Private helper method that writes a file to the given file system.
private static void writeFile(FileSystem fs, Path path, byte[] buffer,
int bufferSize) throws IOException
{
OutputStream out;
out = fs.create(path, true, bufferSize, (short) 1, 1024);
out.write(buffer, 0, bufferSize);
out.close();
}
/**
* Initialize {@link DFSConfigKeys#DFS_NAMENODE_NAME_DIR_KEY} and
* {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} with the specified
* number of directory entries. Also initialize dfs.blockreport.intervalMsec.
*/
public static Configuration initializeStorageStateConf(int numDirs,
Configuration conf) {
StringBuffer nameNodeDirs =
new StringBuffer(new File(TEST_ROOT_DIR, "name1").toString());
StringBuffer dataNodeDirs =
new StringBuffer(new File(TEST_ROOT_DIR, "data1").toString());
for (int i = 2; i <= numDirs; i++) {
nameNodeDirs.append("," + new File(TEST_ROOT_DIR, "name"+i));
dataNodeDirs.append("," + new File(TEST_ROOT_DIR, "data"+i));
}
if (conf == null) {
conf = new HdfsConfiguration();
}
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameNodeDirs.toString());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameNodeDirs.toString());
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataNodeDirs.toString());
conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000);
return conf;
}
/**
* Create empty directories. If a specified directory already exists
* then it is first removed.
*/
public static void createEmptyDirs(String[] dirs) throws IOException {
for (String d : dirs) {
File dir = new File(d);
if (dir.exists()) {
FileUtil.fullyDelete(dir);
}
dir.mkdirs();
}
}
/**
* Return the checksum for the singleton master storage directory
* for namenode
*/
public static long checksumMasterNameNodeContents() {
return namenodeStorageChecksum;
}
/**
* Return the checksum for the singleton master storage directory
* for datanode
*/
public static long checksumMasterDataNodeContents() {
return datanodeStorageChecksum;
}
/**
* Return the checksum for the singleton master storage directory
* for block pool.
*/
public static long checksumMasterBlockPoolContents() {
return blockPoolStorageChecksum;
}
/**
* Return the checksum for the singleton master storage directory
* for finalized dir under block pool.
*/
public static long checksumMasterBlockPoolFinalizedContents() {
return blockPoolFinalizedStorageChecksum;
}
/**
* Return the checksum for the singleton master storage directory
* for rbw dir under block pool.
*/
public static long checksumMasterBlockPoolRbwContents() {
return blockPoolRbwStorageChecksum;
}
/**
* Compute the checksum of all the files in the specified directory.
* This method provides an easy way to ensure equality between the contents
* of two directories.
*
* @param nodeType if DATA_NODE then any file named "VERSION" is ignored.
* This is because this file file is changed every time
* the Datanode is started.
* @param dir must be a directory
* @param recursive whether or not to consider subdirectories
*
* @throws IllegalArgumentException if specified directory is not a directory
* @throws IOException if an IOException occurs while reading the files
* @return the computed checksum value
*/
public static long checksumContents(NodeType nodeType, File dir,
boolean recursive) throws IOException {
CRC32 checksum = new CRC32();
checksumContentsHelper(nodeType, dir, checksum, recursive);
return checksum.getValue();
}
public static void checksumContentsHelper(NodeType nodeType, File dir,
CRC32 checksum, boolean recursive) throws IOException {
if (!dir.isDirectory()) {
throw new IllegalArgumentException(
"Given argument is not a directory:" + dir);
}
File[] list = dir.listFiles();
Arrays.sort(list);
for (int i = 0; i < list.length; i++) {
if (!list[i].isFile()) {
if (recursive) {
checksumContentsHelper(nodeType, list[i], checksum, recursive);
}
continue;
}
// skip VERSION and dfsUsed and replicas file for DataNodes
if (nodeType == DATA_NODE &&
(list[i].getName().equals("VERSION") ||
list[i].getName().equals("dfsUsed") ||
list[i].getName().equals("replicas"))) {
continue;
}
FileInputStream fis = null;
try {
fis = new FileInputStream(list[i]);
byte[] buffer = new byte[1024];
int bytesRead;
while ((bytesRead = fis.read(buffer)) != -1) {
checksum.update(buffer, 0, bytesRead);
}
} finally {
if(fis != null) {
fis.close();
}
}
}
}
/**
* Simulate the {@link DFSConfigKeys#DFS_NAMENODE_NAME_DIR_KEY} of a populated
* DFS filesystem.
* This method populates for each parent directory, <code>parent/dirName</code>
* with the content of namenode storage directory that comes from a singleton
* namenode master (that contains edits, fsimage, version and time files).
* If the destination directory does not exist, it will be created.
* If the directory already exists, it will first be deleted.
*
* @param parents parent directory where {@code dirName} is created
* @param dirName directory under which storage directory is created
* @return the array of created directories
*/
public static File[] createNameNodeStorageDirs(String[] parents,
String dirName) throws Exception {
File[] retVal = new File[parents.length];
for (int i = 0; i < parents.length; i++) {
File newDir = new File(parents[i], dirName);
createEmptyDirs(new String[] {newDir.toString()});
LocalFileSystem localFS = FileSystem.getLocal(new HdfsConfiguration());
localFS.copyToLocalFile(new Path(namenodeStorage.toString(), "current"),
new Path(newDir.toString()),
false);
retVal[i] = newDir;
}
return retVal;
}
/**
* Simulate the {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} of a
* populated DFS filesystem.
* This method populates for each parent directory, <code>parent/dirName</code>
* with the content of datanode storage directory that comes from a singleton
* datanode master (that contains version and block files). If the destination
* directory does not exist, it will be created. If the directory already
* exists, it will first be deleted.
*
* @param parents parent directory where {@code dirName} is created
* @param dirName directory under which storage directory is created
* @return the array of created directories
*/
public static File[] createDataNodeStorageDirs(String[] parents,
String dirName) throws Exception {
File[] retVal = new File[parents.length];
for (int i = 0; i < parents.length; i++) {
File newDir = new File(parents[i], dirName);
createEmptyDirs(new String[] {newDir.toString()});
LocalFileSystem localFS = FileSystem.getLocal(new HdfsConfiguration());
localFS.copyToLocalFile(new Path(datanodeStorage.toString(), "current"),
new Path(newDir.toString()),
false);
retVal[i] = newDir;
}
return retVal;
}
/**
* Simulate the {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} of a
* populated DFS filesystem.
* This method populates for each parent directory, <code>parent/dirName</code>
* with the content of block pool storage directory that comes from a singleton
* datanode master (that contains version and block files). If the destination
* directory does not exist, it will be created. If the directory already
* exists, it will first be deleted.
*
* @param parents parent directory where {@code dirName} is created
* @param dirName directory under which storage directory is created
* @param bpid block pool id for which the storage directory is created.
* @return the array of created directories
*/
public static File[] createBlockPoolStorageDirs(String[] parents,
String dirName, String bpid) throws Exception {
File[] retVal = new File[parents.length];
Path bpCurDir = new Path(MiniDFSCluster.getBPDir(datanodeStorage,
bpid, Storage.STORAGE_DIR_CURRENT));
for (int i = 0; i < parents.length; i++) {
File newDir = new File(parents[i] + "/current/" + bpid, dirName);
createEmptyDirs(new String[] {newDir.toString()});
LocalFileSystem localFS = FileSystem.getLocal(new HdfsConfiguration());
localFS.copyToLocalFile(bpCurDir,
new Path(newDir.toString()),
false);
retVal[i] = newDir;
}
return retVal;
}
/**
* Create a <code>version</code> file for namenode inside the specified parent
* directory. If such a file already exists, it will be overwritten.
* The given version string will be written to the file as the layout
* version. None of the parameters may be null.
*
* @param parent directory where namenode VERSION file is stored
* @param version StorageInfo to create VERSION file from
* @param bpid Block pool Id
*
* @return the created version file
*/
public static File[] createNameNodeVersionFile(Configuration conf,
File[] parent, StorageInfo version, String bpid) throws IOException {
Storage storage = new NNStorage(conf,
Collections.<URI>emptyList(),
Collections.<URI>emptyList());
storage.setStorageInfo(version);
File[] versionFiles = new File[parent.length];
for (int i = 0; i < parent.length; i++) {
versionFiles[i] = new File(parent[i], "VERSION");
StorageDirectory sd = new StorageDirectory(parent[i].getParentFile());
storage.writeProperties(versionFiles[i], sd);
}
return versionFiles;
}
/**
* Create a <code>version</code> file for datanode inside the specified parent
* directory. If such a file already exists, it will be overwritten.
* The given version string will be written to the file as the layout
* version. None of the parameters may be null.
*
* @param parent directory where namenode VERSION file is stored
* @param version StorageInfo to create VERSION file from
* @param bpid Block pool Id
*/
public static void createDataNodeVersionFile(File[] parent,
StorageInfo version, String bpid) throws IOException {
createDataNodeVersionFile(parent, version, bpid, bpid);
}
/**
* Create a <code>version</code> file for datanode inside the specified parent
* directory. If such a file already exists, it will be overwritten.
* The given version string will be written to the file as the layout
* version. None of the parameters may be null.
*
* @param parent directory where namenode VERSION file is stored
* @param version StorageInfo to create VERSION file from
* @param bpid Block pool Id
* @param bpidToWrite Block pool Id to write into the version file
*/
public static void createDataNodeVersionFile(File[] parent,
StorageInfo version, String bpid, String bpidToWrite) throws IOException {
DataStorage storage = new DataStorage(version);
storage.setDatanodeUuid("FixedDatanodeUuid");
File[] versionFiles = new File[parent.length];
for (int i = 0; i < parent.length; i++) {
File versionFile = new File(parent[i], "VERSION");
StorageDirectory sd = new StorageDirectory(parent[i].getParentFile());
storage.createStorageID(sd, false);
storage.writeProperties(versionFile, sd);
versionFiles[i] = versionFile;
File bpDir = BlockPoolSliceStorage.getBpRoot(bpid, parent[i]);
createBlockPoolVersionFile(bpDir, version, bpidToWrite);
}
}
public static void createBlockPoolVersionFile(File bpDir,
StorageInfo version, String bpid) throws IOException {
// Create block pool version files
if (DataNodeLayoutVersion.supports(
LayoutVersion.Feature.FEDERATION, version.layoutVersion)) {
File bpCurDir = new File(bpDir, Storage.STORAGE_DIR_CURRENT);
BlockPoolSliceStorage bpStorage = new BlockPoolSliceStorage(version,
bpid);
File versionFile = new File(bpCurDir, "VERSION");
StorageDirectory sd = new StorageDirectory(bpDir);
bpStorage.writeProperties(versionFile, sd);
}
}
/**
* Corrupt the specified file. Some random bytes within the file
* will be changed to some random values.
*
* @throws IllegalArgumentException if the given file is not a file
* @throws IOException if an IOException occurs while reading or writing the file
*/
public static void corruptFile(File file,
byte[] stringToCorrupt,
byte[] replacement) throws IOException {
Preconditions.checkArgument(replacement.length == stringToCorrupt.length);
if (!file.isFile()) {
throw new IllegalArgumentException(
"Given argument is not a file:" + file);
}
byte[] data = Files.toByteArray(file);
int index = Bytes.indexOf(data, stringToCorrupt);
if (index == -1) {
throw new IOException(
"File " + file + " does not contain string " +
new String(stringToCorrupt));
}
for (int i = 0; i < stringToCorrupt.length; i++) {
data[index + i] = replacement[i];
}
Files.write(data, file);
}
/**
* Return the layout version inherent in the current version
* of the Namenode, whether it is running or not.
*/
public static int getCurrentNameNodeLayoutVersion() {
return HdfsServerConstants.NAMENODE_LAYOUT_VERSION;
}
/**
* Return the namespace ID inherent in the currently running
* Namenode. If no Namenode is running, return the namespace ID of
* the master Namenode storage directory.
*
* The UpgradeUtilities.initialize() method must be called once before
* calling this method.
*/
public static int getCurrentNamespaceID(MiniDFSCluster cluster) throws IOException {
if (cluster != null) {
return cluster.getNameNodeRpc().versionRequest().getNamespaceID();
}
return namenodeStorageNamespaceID;
}
/**
* Return the cluster ID inherent in the currently running
* Namenode.
*/
public static String getCurrentClusterID(MiniDFSCluster cluster) throws IOException {
if (cluster != null) {
return cluster.getNameNodeRpc().versionRequest().getClusterID();
}
return namenodeStorageClusterID;
}
/**
* Return the blockpool ID inherent in the currently running
* Namenode.
*/
public static String getCurrentBlockPoolID(MiniDFSCluster cluster) throws IOException {
if (cluster != null) {
return cluster.getNameNodeRpc().versionRequest().getBlockPoolID();
}
return namenodeStorageBlockPoolID;
}
/**
* Return the File System State Creation Timestamp (FSSCTime) inherent
* in the currently running Namenode. If no Namenode is running,
* return the FSSCTime of the master Namenode storage directory.
*
* The UpgradeUtilities.initialize() method must be called once before
* calling this method.
*/
public static long getCurrentFsscTime(MiniDFSCluster cluster) throws IOException {
if (cluster != null) {
return cluster.getNameNodeRpc().versionRequest().getCTime();
}
return namenodeStorageFsscTime;
}
/**
* Create empty block pool directories
* @return array of block pool directories
*/
public static String[] createEmptyBPDirs(String[] baseDirs, String bpid)
throws IOException {
String[] bpDirs = new String[baseDirs.length];
for (int i = 0; i < baseDirs.length; i++) {
bpDirs[i] = MiniDFSCluster.getBPDir(new File(baseDirs[i]), bpid);
}
createEmptyDirs(bpDirs);
return bpDirs;
}
}
| 24,482 | 39.40099 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelUnixDomainRead.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.hamcrest.CoreMatchers.equalTo;
import java.io.File;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.net.unix.DomainSocket;
import org.apache.hadoop.net.unix.TemporarySocketDirectory;
import org.junit.AfterClass;
import org.junit.Assume;
import org.junit.Before;
import org.junit.BeforeClass;
public class TestParallelUnixDomainRead extends TestParallelReadUtil {
private static TemporarySocketDirectory sockDir;
@BeforeClass
static public void setupCluster() throws Exception {
if (DomainSocket.getLoadingFailureReason() != null) return;
DFSInputStream.tcpReadsDisabledForTesting = true;
sockDir = new TemporarySocketDirectory();
HdfsConfiguration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
new File(sockDir.getDir(), "TestParallelLocalRead.%d.sock").getAbsolutePath());
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, false);
conf.setBoolean(DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, true);
DomainSocket.disableBindPathValidation();
setupCluster(1, conf);
}
@Before
public void before() {
Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
}
@AfterClass
static public void teardownCluster() throws Exception {
if (DomainSocket.getLoadingFailureReason() != null) return;
sockDir.close();
TestParallelReadUtil.teardownCluster();
}
}
| 2,296 | 36.655738 | 85 |
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.