repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestHSync.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
import java.io.IOException;
import java.util.EnumSet;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.AppendTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.RandomDatum;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.io.SequenceFile.Writer;
import org.apache.hadoop.io.compress.DefaultCodec;
import org.junit.Test;
public class TestHSync {
private void checkSyncMetric(MiniDFSCluster cluster, int dn, long value) {
DataNode datanode = cluster.getDataNodes().get(dn);
assertCounter("FsyncCount", value, getMetrics(datanode.getMetrics().name()));
}
private void checkSyncMetric(MiniDFSCluster cluster, long value) {
checkSyncMetric(cluster, 0, value);
}
/** Test basic hsync cases */
@Test
public void testHSync() throws Exception {
testHSyncOperation(false);
}
@Test
public void testHSyncWithAppend() throws Exception {
testHSyncOperation(true);
}
private void testHSyncOperation(boolean testWithAppend) throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
final DistributedFileSystem fs = cluster.getFileSystem();
final Path p = new Path("/testHSync/foo");
final int len = 1 << 16;
FSDataOutputStream out = fs.create(p, FsPermission.getDefault(),
EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE, CreateFlag.SYNC_BLOCK),
4096, (short) 1, len, null);
if (testWithAppend) {
// re-open the file with append call
out.close();
out = fs.append(p, EnumSet.of(CreateFlag.APPEND, CreateFlag.SYNC_BLOCK),
4096, null);
}
out.hflush();
// hflush does not sync
checkSyncMetric(cluster, 0);
out.hsync();
// hsync on empty file does nothing
checkSyncMetric(cluster, 0);
out.write(1);
checkSyncMetric(cluster, 0);
out.hsync();
checkSyncMetric(cluster, 1);
// avoiding repeated hsyncs is a potential future optimization
out.hsync();
checkSyncMetric(cluster, 2);
out.hflush();
// hflush still does not sync
checkSyncMetric(cluster, 2);
out.close();
// close is sync'ing
checkSyncMetric(cluster, 3);
// same with a file created with out SYNC_BLOCK
out = fs.create(p, FsPermission.getDefault(),
EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
4096, (short) 1, len, null);
out.hsync();
checkSyncMetric(cluster, 3);
out.write(1);
checkSyncMetric(cluster, 3);
out.hsync();
checkSyncMetric(cluster, 4);
// repeated hsyncs
out.hsync();
checkSyncMetric(cluster, 5);
out.close();
// close does not sync (not opened with SYNC_BLOCK)
checkSyncMetric(cluster, 5);
cluster.shutdown();
}
/** Test hsync on an exact block boundary */
@Test
public void testHSyncBlockBoundary() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
final FileSystem fs = cluster.getFileSystem();
final Path p = new Path("/testHSyncBlockBoundary/foo");
final int len = 1 << 16;
final byte[] fileContents = AppendTestUtil.initBuffer(len);
FSDataOutputStream out = fs.create(p, FsPermission.getDefault(),
EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE, CreateFlag.SYNC_BLOCK),
4096, (short) 1, len, null);
// fill exactly one block (tests the SYNC_BLOCK case) and flush
out.write(fileContents, 0, len);
out.hflush();
// the full block should have caused a sync
checkSyncMetric(cluster, 1);
out.hsync();
// first on block again
checkSyncMetric(cluster, 1);
// write one more byte and sync again
out.write(1);
out.hsync();
checkSyncMetric(cluster, 2);
out.close();
checkSyncMetric(cluster, 3);
cluster.shutdown();
}
/** Test hsync via SequenceFiles */
@Test
public void testSequenceFileSync() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
final FileSystem fs = cluster.getFileSystem();
final Path p = new Path("/testSequenceFileSync/foo");
final int len = 1 << 16;
FSDataOutputStream out = fs.create(p, FsPermission.getDefault(),
EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE, CreateFlag.SYNC_BLOCK),
4096, (short) 1, len, null);
Writer w = SequenceFile.createWriter(new Configuration(),
Writer.stream(out),
Writer.keyClass(RandomDatum.class),
Writer.valueClass(RandomDatum.class),
Writer.compression(CompressionType.NONE, new DefaultCodec()));
w.hflush();
checkSyncMetric(cluster, 0);
w.hsync();
checkSyncMetric(cluster, 1);
int seed = new Random().nextInt();
RandomDatum.Generator generator = new RandomDatum.Generator(seed);
generator.next();
w.append(generator.getKey(), generator.getValue());
w.hsync();
checkSyncMetric(cluster, 2);
w.close();
checkSyncMetric(cluster, 2);
out.close();
checkSyncMetric(cluster, 3);
cluster.shutdown();
}
/** Test that syncBlock is correctly performed at replicas */
@Test
public void testHSyncWithReplication() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
final FileSystem fs = cluster.getFileSystem();
final Path p = new Path("/testHSyncWithReplication/foo");
final int len = 1 << 16;
FSDataOutputStream out = fs.create(p, FsPermission.getDefault(),
EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE, CreateFlag.SYNC_BLOCK),
4096, (short) 3, len, null);
out.write(1);
out.hflush();
checkSyncMetric(cluster, 0, 0);
checkSyncMetric(cluster, 1, 0);
checkSyncMetric(cluster, 2, 0);
out.hsync();
checkSyncMetric(cluster, 0, 1);
checkSyncMetric(cluster, 1, 1);
checkSyncMetric(cluster, 2, 1);
out.hsync();
checkSyncMetric(cluster, 0, 2);
checkSyncMetric(cluster, 1, 2);
checkSyncMetric(cluster, 2, 2);
cluster.shutdown();
}
}
| 7,595 | 35 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestAvailableSpaceVolumeChoosingPolicy.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.fsdataset;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_THRESHOLD_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.ReflectionUtils;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.Mockito;
public class TestAvailableSpaceVolumeChoosingPolicy {
private static final int RANDOMIZED_ITERATIONS = 10000;
private static final float RANDOMIZED_ERROR_PERCENT = 0.05f;
private static final long RANDOMIZED_ALLOWED_ERROR = (long) (RANDOMIZED_ERROR_PERCENT * RANDOMIZED_ITERATIONS);
private static void initPolicy(VolumeChoosingPolicy<FsVolumeSpi> policy,
float preferencePercent) {
Configuration conf = new Configuration();
// Set the threshold to consider volumes imbalanced to 1MB
conf.setLong(
DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_THRESHOLD_KEY,
1024 * 1024); // 1MB
conf.setFloat(
DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY,
preferencePercent);
((Configurable) policy).setConf(conf);
}
// Test the Round-Robin block-volume fallback path when all volumes are within
// the threshold.
@Test(timeout=60000)
public void testRR() throws Exception {
@SuppressWarnings("unchecked")
final AvailableSpaceVolumeChoosingPolicy<FsVolumeSpi> policy =
ReflectionUtils.newInstance(AvailableSpaceVolumeChoosingPolicy.class, null);
initPolicy(policy, 1.0f);
TestRoundRobinVolumeChoosingPolicy.testRR(policy);
}
// ChooseVolume should throw DiskOutOfSpaceException
// with volume and block sizes in exception message.
@Test(timeout=60000)
public void testRRPolicyExceptionMessage() throws Exception {
final AvailableSpaceVolumeChoosingPolicy<FsVolumeSpi> policy
= new AvailableSpaceVolumeChoosingPolicy<FsVolumeSpi>();
initPolicy(policy, 1.0f);
TestRoundRobinVolumeChoosingPolicy.testRRPolicyExceptionMessage(policy);
}
@Test(timeout=60000)
public void testTwoUnbalancedVolumes() throws Exception {
@SuppressWarnings("unchecked")
final AvailableSpaceVolumeChoosingPolicy<FsVolumeSpi> policy =
ReflectionUtils.newInstance(AvailableSpaceVolumeChoosingPolicy.class, null);
initPolicy(policy, 1.0f);
List<FsVolumeSpi> volumes = new ArrayList<FsVolumeSpi>();
// First volume with 1MB free space
volumes.add(Mockito.mock(FsVolumeSpi.class));
Mockito.when(volumes.get(0).getAvailable()).thenReturn(1024L * 1024L);
// Second volume with 3MB free space, which is a difference of 2MB, more
// than the threshold of 1MB.
volumes.add(Mockito.mock(FsVolumeSpi.class));
Mockito.when(volumes.get(1).getAvailable()).thenReturn(1024L * 1024L * 3);
Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100));
Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100));
Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100));
}
@Test(timeout=60000)
public void testThreeUnbalancedVolumes() throws Exception {
@SuppressWarnings("unchecked")
final AvailableSpaceVolumeChoosingPolicy<FsVolumeSpi> policy =
ReflectionUtils.newInstance(AvailableSpaceVolumeChoosingPolicy.class, null);
List<FsVolumeSpi> volumes = new ArrayList<FsVolumeSpi>();
// First volume with 1MB free space
volumes.add(Mockito.mock(FsVolumeSpi.class));
Mockito.when(volumes.get(0).getAvailable()).thenReturn(1024L * 1024L);
// Second volume with 3MB free space, which is a difference of 2MB, more
// than the threshold of 1MB.
volumes.add(Mockito.mock(FsVolumeSpi.class));
Mockito.when(volumes.get(1).getAvailable()).thenReturn(1024L * 1024L * 3);
// Third volume, again with 3MB free space.
volumes.add(Mockito.mock(FsVolumeSpi.class));
Mockito.when(volumes.get(2).getAvailable()).thenReturn(1024L * 1024L * 3);
// We should alternate assigning between the two volumes with a lot of free
// space.
initPolicy(policy, 1.0f);
Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100));
Assert.assertEquals(volumes.get(2), policy.chooseVolume(volumes, 100));
Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100));
Assert.assertEquals(volumes.get(2), policy.chooseVolume(volumes, 100));
// All writes should be assigned to the volume with the least free space.
initPolicy(policy, 0.0f);
Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100));
Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100));
Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100));
Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100));
}
@Test(timeout=60000)
public void testFourUnbalancedVolumes() throws Exception {
@SuppressWarnings("unchecked")
final AvailableSpaceVolumeChoosingPolicy<FsVolumeSpi> policy =
ReflectionUtils.newInstance(AvailableSpaceVolumeChoosingPolicy.class, null);
List<FsVolumeSpi> volumes = new ArrayList<FsVolumeSpi>();
// First volume with 1MB free space
volumes.add(Mockito.mock(FsVolumeSpi.class));
Mockito.when(volumes.get(0).getAvailable()).thenReturn(1024L * 1024L);
// Second volume with 1MB + 1 byte free space
volumes.add(Mockito.mock(FsVolumeSpi.class));
Mockito.when(volumes.get(1).getAvailable()).thenReturn(1024L * 1024L + 1);
// Third volume with 3MB free space, which is a difference of 2MB, more
// than the threshold of 1MB.
volumes.add(Mockito.mock(FsVolumeSpi.class));
Mockito.when(volumes.get(2).getAvailable()).thenReturn(1024L * 1024L * 3);
// Fourth volume, again with 3MB free space.
volumes.add(Mockito.mock(FsVolumeSpi.class));
Mockito.when(volumes.get(3).getAvailable()).thenReturn(1024L * 1024L * 3);
// We should alternate assigning between the two volumes with a lot of free
// space.
initPolicy(policy, 1.0f);
Assert.assertEquals(volumes.get(2), policy.chooseVolume(volumes, 100));
Assert.assertEquals(volumes.get(3), policy.chooseVolume(volumes, 100));
Assert.assertEquals(volumes.get(2), policy.chooseVolume(volumes, 100));
Assert.assertEquals(volumes.get(3), policy.chooseVolume(volumes, 100));
// We should alternate assigning between the two volumes with less free
// space.
initPolicy(policy, 0.0f);
Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100));
Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100));
Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100));
Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100));
}
@Test(timeout=60000)
public void testNotEnoughSpaceOnSelectedVolume() throws Exception {
@SuppressWarnings("unchecked")
final AvailableSpaceVolumeChoosingPolicy<FsVolumeSpi> policy =
ReflectionUtils.newInstance(AvailableSpaceVolumeChoosingPolicy.class, null);
List<FsVolumeSpi> volumes = new ArrayList<FsVolumeSpi>();
// First volume with 1MB free space
volumes.add(Mockito.mock(FsVolumeSpi.class));
Mockito.when(volumes.get(0).getAvailable()).thenReturn(1024L * 1024L);
// Second volume with 3MB free space, which is a difference of 2MB, more
// than the threshold of 1MB.
volumes.add(Mockito.mock(FsVolumeSpi.class));
Mockito.when(volumes.get(1).getAvailable()).thenReturn(1024L * 1024L * 3);
// All writes should be assigned to the volume with the least free space.
// However, if the volume with the least free space doesn't have enough
// space to accept the replica size, and another volume does have enough
// free space, that should be chosen instead.
initPolicy(policy, 0.0f);
Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 1024L * 1024L * 2));
}
@Test(timeout=60000)
public void testAvailableSpaceChanges() throws Exception {
@SuppressWarnings("unchecked")
final AvailableSpaceVolumeChoosingPolicy<FsVolumeSpi> policy =
ReflectionUtils.newInstance(AvailableSpaceVolumeChoosingPolicy.class, null);
initPolicy(policy, 1.0f);
List<FsVolumeSpi> volumes = new ArrayList<FsVolumeSpi>();
// First volume with 1MB free space
volumes.add(Mockito.mock(FsVolumeSpi.class));
Mockito.when(volumes.get(0).getAvailable()).thenReturn(1024L * 1024L);
// Second volume with 3MB free space, which is a difference of 2MB, more
// than the threshold of 1MB.
volumes.add(Mockito.mock(FsVolumeSpi.class));
Mockito.when(volumes.get(1).getAvailable())
.thenReturn(1024L * 1024L * 3)
.thenReturn(1024L * 1024L * 3)
.thenReturn(1024L * 1024L * 3)
.thenReturn(1024L * 1024L * 1); // After the third check, return 1MB.
// Should still be able to get a volume for the replica even though the
// available space on the second volume changed.
Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100));
}
@Test(timeout=60000)
public void randomizedTest1() throws Exception {
doRandomizedTest(0.75f, 1, 1);
}
@Test(timeout=60000)
public void randomizedTest2() throws Exception {
doRandomizedTest(0.75f, 5, 1);
}
@Test(timeout=60000)
public void randomizedTest3() throws Exception {
doRandomizedTest(0.75f, 1, 5);
}
@Test(timeout=60000)
public void randomizedTest4() throws Exception {
doRandomizedTest(0.90f, 5, 1);
}
/*
* Ensure that we randomly select the lesser-used volumes with appropriate
* frequency.
*/
public void doRandomizedTest(float preferencePercent, int lowSpaceVolumes,
int highSpaceVolumes) throws Exception {
Random random = new Random(123L);
final AvailableSpaceVolumeChoosingPolicy<FsVolumeSpi> policy =
new AvailableSpaceVolumeChoosingPolicy<FsVolumeSpi>(random);
List<FsVolumeSpi> volumes = new ArrayList<FsVolumeSpi>();
// Volumes with 1MB free space
for (int i = 0; i < lowSpaceVolumes; i++) {
FsVolumeSpi volume = Mockito.mock(FsVolumeSpi.class);
Mockito.when(volume.getAvailable()).thenReturn(1024L * 1024L);
volumes.add(volume);
}
// Volumes with 3MB free space
for (int i = 0; i < highSpaceVolumes; i++) {
FsVolumeSpi volume = Mockito.mock(FsVolumeSpi.class);
Mockito.when(volume.getAvailable()).thenReturn(1024L * 1024L * 3);
volumes.add(volume);
}
initPolicy(policy, preferencePercent);
long lowAvailableSpaceVolumeSelected = 0;
long highAvailableSpaceVolumeSelected = 0;
for (int i = 0; i < RANDOMIZED_ITERATIONS; i++) {
FsVolumeSpi volume = policy.chooseVolume(volumes, 100);
for (int j = 0; j < volumes.size(); j++) {
// Note how many times the first low available volume was selected
if (volume == volumes.get(j) && j == 0) {
lowAvailableSpaceVolumeSelected++;
}
// Note how many times the first high available volume was selected
if (volume == volumes.get(j) && j == lowSpaceVolumes) {
highAvailableSpaceVolumeSelected++;
break;
}
}
}
// Calculate the expected ratio of how often low available space volumes
// were selected vs. high available space volumes.
float expectedSelectionRatio = preferencePercent / (1 - preferencePercent);
GenericTestUtils.assertValueNear(
(long)(lowAvailableSpaceVolumeSelected * expectedSelectionRatio),
highAvailableSpaceVolumeSelected,
RANDOMIZED_ALLOWED_ERROR);
}
}
| 12,920 | 41.503289 | 142 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestRoundRobinVolumeChoosingPolicy.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.fsdataset;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
import org.apache.hadoop.util.ReflectionUtils;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.Mockito;
public class TestRoundRobinVolumeChoosingPolicy {
// Test the Round-Robin block-volume choosing algorithm.
@Test
public void testRR() throws Exception {
@SuppressWarnings("unchecked")
final RoundRobinVolumeChoosingPolicy<FsVolumeSpi> policy =
ReflectionUtils.newInstance(RoundRobinVolumeChoosingPolicy.class, null);
testRR(policy);
}
public static void testRR(VolumeChoosingPolicy<FsVolumeSpi> policy)
throws Exception {
final List<FsVolumeSpi> volumes = new ArrayList<FsVolumeSpi>();
// First volume, with 100 bytes of space.
volumes.add(Mockito.mock(FsVolumeSpi.class));
Mockito.when(volumes.get(0).getAvailable()).thenReturn(100L);
// Second volume, with 200 bytes of space.
volumes.add(Mockito.mock(FsVolumeSpi.class));
Mockito.when(volumes.get(1).getAvailable()).thenReturn(200L);
// Test two rounds of round-robin choosing
Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 0));
Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 0));
Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 0));
Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 0));
// The first volume has only 100L space, so the policy should
// wisely choose the second one in case we ask for more.
Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 150));
// Fail if no volume can be chosen?
try {
policy.chooseVolume(volumes, Long.MAX_VALUE);
Assert.fail();
} catch (IOException e) {
// Passed.
}
}
// ChooseVolume should throw DiskOutOfSpaceException
// with volume and block sizes in exception message.
@Test
public void testRRPolicyExceptionMessage() throws Exception {
final RoundRobinVolumeChoosingPolicy<FsVolumeSpi> policy
= new RoundRobinVolumeChoosingPolicy<FsVolumeSpi>();
testRRPolicyExceptionMessage(policy);
}
public static void testRRPolicyExceptionMessage(
VolumeChoosingPolicy<FsVolumeSpi> policy) throws Exception {
final List<FsVolumeSpi> volumes = new ArrayList<FsVolumeSpi>();
// First volume, with 500 bytes of space.
volumes.add(Mockito.mock(FsVolumeSpi.class));
Mockito.when(volumes.get(0).getAvailable()).thenReturn(500L);
// Second volume, with 600 bytes of space.
volumes.add(Mockito.mock(FsVolumeSpi.class));
Mockito.when(volumes.get(1).getAvailable()).thenReturn(600L);
int blockSize = 700;
try {
policy.chooseVolume(volumes, blockSize);
Assert.fail("expected to throw DiskOutOfSpaceException");
} catch(DiskOutOfSpaceException e) {
Assert.assertEquals("Not returnig the expected message",
"Out of space: The volume with the most available space (=" + 600
+ " B) is less than the block size (=" + blockSize + " B).",
e.getMessage());
}
}
}
| 4,038 | 37.103774 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistReplicaPlacement.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Test;
import java.io.IOException;
import static org.apache.hadoop.fs.StorageType.DEFAULT;
import static org.apache.hadoop.fs.StorageType.RAM_DISK;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.junit.Assert.fail;
public class TestLazyPersistReplicaPlacement extends LazyPersistTestCase {
@Test
public void testPlacementOnRamDisk() throws IOException {
getClusterBuilder().build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path path = new Path("/" + METHOD_NAME + ".dat");
makeTestFile(path, BLOCK_SIZE, true);
ensureFileReplicasOnStorageType(path, RAM_DISK);
}
@Test
public void testPlacementOnSizeLimitedRamDisk() throws IOException {
getClusterBuilder().setRamDiskReplicaCapacity(3).build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
Path path2 = new Path("/" + METHOD_NAME + ".02.dat");
makeTestFile(path1, BLOCK_SIZE, true);
makeTestFile(path2, BLOCK_SIZE, true);
ensureFileReplicasOnStorageType(path1, RAM_DISK);
ensureFileReplicasOnStorageType(path2, RAM_DISK);
}
/**
* Client tries to write LAZY_PERSIST to same DN with no RamDisk configured
* Write should default to disk. No error.
* @throws IOException
*/
@Test
public void testFallbackToDisk() throws IOException {
getClusterBuilder().setHasTransientStorage(false).build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path path = new Path("/" + METHOD_NAME + ".dat");
makeTestFile(path, BLOCK_SIZE, true);
ensureFileReplicasOnStorageType(path, DEFAULT);
}
@Test
public void testSynchronousEviction() throws Exception {
getClusterBuilder().setMaxLockedMemory(BLOCK_SIZE).build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
final Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
makeTestFile(path1, BLOCK_SIZE, true);
ensureFileReplicasOnStorageType(path1, RAM_DISK);
// Wait until the replica is written to persistent storage.
waitForMetric("RamDiskBlocksLazyPersisted", 1);
// Ensure that writing a new file to RAM DISK evicts the block
// for the previous one.
Path path2 = new Path("/" + METHOD_NAME + ".02.dat");
makeTestFile(path2, BLOCK_SIZE, true);
verifyRamDiskJMXMetric("RamDiskBlocksEvictedWithoutRead", 1);
}
/**
* File can not fit in RamDisk even with eviction
* @throws IOException
*/
@Test
public void testFallbackToDiskFull() throws Exception {
getClusterBuilder().setMaxLockedMemory(BLOCK_SIZE / 2).build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path path = new Path("/" + METHOD_NAME + ".dat");
makeTestFile(path, BLOCK_SIZE, true);
ensureFileReplicasOnStorageType(path, DEFAULT);
verifyRamDiskJMXMetric("RamDiskBlocksWriteFallback", 1);
}
/**
* File partially fit in RamDisk after eviction.
* RamDisk can fit 2 blocks. Write a file with 5 blocks.
* Expect 2 blocks are on RamDisk and rest on disk.
* @throws IOException
*/
@Test
public void testFallbackToDiskPartial()
throws IOException, InterruptedException {
getClusterBuilder().setMaxLockedMemory(2 * BLOCK_SIZE).build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path path = new Path("/" + METHOD_NAME + ".dat");
makeTestFile(path, BLOCK_SIZE * 5, true);
// Sleep for a short time to allow the lazy writer thread to do its job
Thread.sleep(6 * LAZY_WRITER_INTERVAL_SEC * 1000);
triggerBlockReport();
int numBlocksOnRamDisk = 0;
int numBlocksOnDisk = 0;
long fileLength = client.getFileInfo(path.toString()).getLen();
LocatedBlocks locatedBlocks =
client.getLocatedBlocks(path.toString(), 0, fileLength);
for (LocatedBlock locatedBlock : locatedBlocks.getLocatedBlocks()) {
if (locatedBlock.getStorageTypes()[0] == RAM_DISK) {
numBlocksOnRamDisk++;
} else if (locatedBlock.getStorageTypes()[0] == DEFAULT) {
numBlocksOnDisk++;
}
}
// Since eviction is asynchronous, depending on the timing of eviction
// wrt writes, we may get 2 or less blocks on RAM disk.
assertThat(numBlocksOnRamDisk, is(2));
assertThat(numBlocksOnDisk, is(3));
}
/**
* If the only available storage is RAM_DISK and the LAZY_PERSIST flag is not
* specified, then block placement should fail.
*
* @throws IOException
*/
@Test
public void testRamDiskNotChosenByDefault() throws IOException {
getClusterBuilder().setStorageTypes(new StorageType[] {RAM_DISK, RAM_DISK})
.build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path path = new Path("/" + METHOD_NAME + ".dat");
try {
makeTestFile(path, BLOCK_SIZE, false);
fail("Block placement to RAM_DISK should have failed without lazyPersist flag");
} catch (Throwable t) {
LOG.info("Got expected exception ", t);
}
}
}
| 6,198 | 35.251462 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
import com.google.common.collect.Lists;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.datanode.BlockScanner;
import org.apache.hadoop.hdfs.server.datanode.DNConf;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.datanode.DataStorage;
import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
import org.apache.hadoop.hdfs.server.datanode.ReplicaHandler;
import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.RoundRobinVolumeChoosingPolicy;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.io.MultipleIOException;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.DiskChecker;
import org.apache.hadoop.util.StringUtils;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Matchers;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.fail;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyListOf;
import static org.mockito.Matchers.anyString;
import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
public class TestFsDatasetImpl {
private static final String BASE_DIR =
new FileSystemTestHelper().getTestRootDir();
private static final int NUM_INIT_VOLUMES = 2;
private static final String CLUSTER_ID = "cluser-id";
private static final String[] BLOCK_POOL_IDS = {"bpid-0", "bpid-1"};
// Use to generate storageUuid
private static final DataStorage dsForStorageUuid = new DataStorage(
new StorageInfo(HdfsServerConstants.NodeType.DATA_NODE));
private Configuration conf;
private DataNode datanode;
private DataStorage storage;
private FsDatasetImpl dataset;
private final static String BLOCKPOOL = "BP-TEST";
private static Storage.StorageDirectory createStorageDirectory(File root) {
Storage.StorageDirectory sd = new Storage.StorageDirectory(root);
dsForStorageUuid.createStorageID(sd, false);
return sd;
}
private static void createStorageDirs(DataStorage storage, Configuration conf,
int numDirs) throws IOException {
List<Storage.StorageDirectory> dirs =
new ArrayList<Storage.StorageDirectory>();
List<String> dirStrings = new ArrayList<String>();
for (int i = 0; i < numDirs; i++) {
File loc = new File(BASE_DIR + "/data" + i);
dirStrings.add(new Path(loc.toString()).toUri().toString());
loc.mkdirs();
dirs.add(createStorageDirectory(loc));
when(storage.getStorageDir(i)).thenReturn(dirs.get(i));
}
String dataDir = StringUtils.join(",", dirStrings);
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDir);
when(storage.dirIterator()).thenReturn(dirs.iterator());
when(storage.getNumStorageDirs()).thenReturn(numDirs);
}
private int getNumVolumes() {
try (FsDatasetSpi.FsVolumeReferences volumes =
dataset.getFsVolumeReferences()) {
return volumes.size();
} catch (IOException e) {
return 0;
}
}
@Before
public void setUp() throws IOException {
datanode = mock(DataNode.class);
storage = mock(DataStorage.class);
this.conf = new Configuration();
this.conf.setLong(DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, 0);
final DNConf dnConf = new DNConf(conf);
when(datanode.getConf()).thenReturn(conf);
when(datanode.getDnConf()).thenReturn(dnConf);
final BlockScanner disabledBlockScanner = new BlockScanner(datanode, conf);
when(datanode.getBlockScanner()).thenReturn(disabledBlockScanner);
createStorageDirs(storage, conf, NUM_INIT_VOLUMES);
dataset = new FsDatasetImpl(datanode, storage, conf);
for (String bpid : BLOCK_POOL_IDS) {
dataset.addBlockPool(bpid, conf);
}
assertEquals(NUM_INIT_VOLUMES, getNumVolumes());
assertEquals(0, dataset.getNumFailedVolumes());
}
@Test
public void testAddVolumes() throws IOException {
final int numNewVolumes = 3;
final int numExistingVolumes = getNumVolumes();
final int totalVolumes = numNewVolumes + numExistingVolumes;
Set<String> expectedVolumes = new HashSet<String>();
List<NamespaceInfo> nsInfos = Lists.newArrayList();
for (String bpid : BLOCK_POOL_IDS) {
nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1));
}
for (int i = 0; i < numNewVolumes; i++) {
String path = BASE_DIR + "/newData" + i;
String pathUri = new Path(path).toUri().toString();
expectedVolumes.add(new File(pathUri).toString());
StorageLocation loc = StorageLocation.parse(pathUri);
Storage.StorageDirectory sd = createStorageDirectory(new File(path));
DataStorage.VolumeBuilder builder =
new DataStorage.VolumeBuilder(storage, sd);
when(storage.prepareVolume(eq(datanode), eq(loc.getFile()),
anyListOf(NamespaceInfo.class)))
.thenReturn(builder);
dataset.addVolume(loc, nsInfos);
}
assertEquals(totalVolumes, getNumVolumes());
assertEquals(totalVolumes, dataset.storageMap.size());
Set<String> actualVolumes = new HashSet<String>();
try (FsDatasetSpi.FsVolumeReferences volumes =
dataset.getFsVolumeReferences()) {
for (int i = 0; i < numNewVolumes; i++) {
actualVolumes.add(volumes.get(numExistingVolumes + i).getBasePath());
}
}
assertEquals(actualVolumes.size(), expectedVolumes.size());
assertTrue(actualVolumes.containsAll(expectedVolumes));
}
@Test(timeout = 30000)
public void testRemoveVolumes() throws IOException {
// Feed FsDataset with block metadata.
final int NUM_BLOCKS = 100;
for (int i = 0; i < NUM_BLOCKS; i++) {
String bpid = BLOCK_POOL_IDS[NUM_BLOCKS % BLOCK_POOL_IDS.length];
ExtendedBlock eb = new ExtendedBlock(bpid, i);
try (ReplicaHandler replica =
dataset.createRbw(StorageType.DEFAULT, eb, false)) {
}
}
final String[] dataDirs =
conf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY).split(",");
final String volumePathToRemove = dataDirs[0];
Set<File> volumesToRemove = new HashSet<>();
volumesToRemove.add(StorageLocation.parse(volumePathToRemove).getFile());
dataset.removeVolumes(volumesToRemove, true);
int expectedNumVolumes = dataDirs.length - 1;
assertEquals("The volume has been removed from the volumeList.",
expectedNumVolumes, getNumVolumes());
assertEquals("The volume has been removed from the storageMap.",
expectedNumVolumes, dataset.storageMap.size());
try {
dataset.asyncDiskService.execute(volumesToRemove.iterator().next(),
new Runnable() {
@Override
public void run() {}
});
fail("Expect RuntimeException: the volume has been removed from the "
+ "AsyncDiskService.");
} catch (RuntimeException e) {
GenericTestUtils.assertExceptionContains("Cannot find root", e);
}
int totalNumReplicas = 0;
for (String bpid : dataset.volumeMap.getBlockPoolList()) {
totalNumReplicas += dataset.volumeMap.size(bpid);
}
assertEquals("The replica infos on this volume has been removed from the "
+ "volumeMap.", NUM_BLOCKS / NUM_INIT_VOLUMES,
totalNumReplicas);
}
@Test(timeout = 5000)
public void testRemoveNewlyAddedVolume() throws IOException {
final int numExistingVolumes = getNumVolumes();
List<NamespaceInfo> nsInfos = new ArrayList<>();
for (String bpid : BLOCK_POOL_IDS) {
nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1));
}
String newVolumePath = BASE_DIR + "/newVolumeToRemoveLater";
StorageLocation loc = StorageLocation.parse(newVolumePath);
Storage.StorageDirectory sd = createStorageDirectory(new File(newVolumePath));
DataStorage.VolumeBuilder builder =
new DataStorage.VolumeBuilder(storage, sd);
when(storage.prepareVolume(eq(datanode), eq(loc.getFile()),
anyListOf(NamespaceInfo.class)))
.thenReturn(builder);
dataset.addVolume(loc, nsInfos);
assertEquals(numExistingVolumes + 1, getNumVolumes());
when(storage.getNumStorageDirs()).thenReturn(numExistingVolumes + 1);
when(storage.getStorageDir(numExistingVolumes)).thenReturn(sd);
Set<File> volumesToRemove = new HashSet<>();
volumesToRemove.add(loc.getFile());
dataset.removeVolumes(volumesToRemove, true);
assertEquals(numExistingVolumes, getNumVolumes());
}
@Test(timeout = 5000)
public void testChangeVolumeWithRunningCheckDirs() throws IOException {
RoundRobinVolumeChoosingPolicy<FsVolumeImpl> blockChooser =
new RoundRobinVolumeChoosingPolicy<>();
conf.setLong(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
final BlockScanner blockScanner = new BlockScanner(datanode, conf);
final FsVolumeList volumeList = new FsVolumeList(
Collections.<VolumeFailureInfo>emptyList(), blockScanner, blockChooser);
final List<FsVolumeImpl> oldVolumes = new ArrayList<>();
// Initialize FsVolumeList with 5 mock volumes.
final int NUM_VOLUMES = 5;
for (int i = 0; i < NUM_VOLUMES; i++) {
FsVolumeImpl volume = mock(FsVolumeImpl.class);
oldVolumes.add(volume);
when(volume.getBasePath()).thenReturn("data" + i);
FsVolumeReference ref = mock(FsVolumeReference.class);
when(ref.getVolume()).thenReturn(volume);
volumeList.addVolume(ref);
}
// When call checkDirs() on the 2nd volume, anther "thread" removes the 5th
// volume and add another volume. It does not affect checkDirs() running.
final FsVolumeImpl newVolume = mock(FsVolumeImpl.class);
final FsVolumeReference newRef = mock(FsVolumeReference.class);
when(newRef.getVolume()).thenReturn(newVolume);
when(newVolume.getBasePath()).thenReturn("data4");
FsVolumeImpl blockedVolume = volumeList.getVolumes().get(1);
doAnswer(new Answer() {
@Override
public Object answer(InvocationOnMock invocationOnMock)
throws Throwable {
volumeList.removeVolume(new File("data4"), false);
volumeList.addVolume(newRef);
return null;
}
}).when(blockedVolume).checkDirs();
FsVolumeImpl brokenVolume = volumeList.getVolumes().get(2);
doThrow(new DiskChecker.DiskErrorException("broken"))
.when(brokenVolume).checkDirs();
volumeList.checkDirs();
// Since FsVolumeImpl#checkDirs() get a snapshot of the list of volumes
// before running removeVolume(), it is supposed to run checkDirs() on all
// the old volumes.
for (FsVolumeImpl volume : oldVolumes) {
verify(volume).checkDirs();
}
// New volume is not visible to checkDirs() process.
verify(newVolume, never()).checkDirs();
assertTrue(volumeList.getVolumes().contains(newVolume));
assertFalse(volumeList.getVolumes().contains(brokenVolume));
assertEquals(NUM_VOLUMES - 1, volumeList.getVolumes().size());
}
@Test
public void testAddVolumeFailureReleasesInUseLock() throws IOException {
FsDatasetImpl spyDataset = spy(dataset);
FsVolumeImpl mockVolume = mock(FsVolumeImpl.class);
File badDir = new File(BASE_DIR, "bad");
badDir.mkdirs();
doReturn(mockVolume).when(spyDataset)
.createFsVolume(anyString(), any(File.class), any(StorageType.class));
doThrow(new IOException("Failed to getVolumeMap()"))
.when(mockVolume).getVolumeMap(
anyString(),
any(ReplicaMap.class),
any(RamDiskReplicaLruTracker.class));
Storage.StorageDirectory sd = createStorageDirectory(badDir);
sd.lock();
DataStorage.VolumeBuilder builder = new DataStorage.VolumeBuilder(storage, sd);
when(storage.prepareVolume(eq(datanode), eq(badDir.getAbsoluteFile()),
Matchers.<List<NamespaceInfo>>any()))
.thenReturn(builder);
StorageLocation location = StorageLocation.parse(badDir.toString());
List<NamespaceInfo> nsInfos = Lists.newArrayList();
for (String bpid : BLOCK_POOL_IDS) {
nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1));
}
try {
spyDataset.addVolume(location, nsInfos);
fail("Expect to throw MultipleIOException");
} catch (MultipleIOException e) {
}
FsDatasetTestUtil.assertFileLockReleased(badDir.toString());
}
@Test
public void testDeletingBlocks() throws IOException {
MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).build();
try {
cluster.waitActive();
DataNode dn = cluster.getDataNodes().get(0);
FsDatasetImpl ds = (FsDatasetImpl) DataNodeTestUtils.getFSDataset(dn);
FsVolumeImpl vol;
try (FsDatasetSpi.FsVolumeReferences volumes = ds.getFsVolumeReferences()) {
vol = (FsVolumeImpl)volumes.get(0);
}
ExtendedBlock eb;
ReplicaInfo info;
List<Block> blockList = new ArrayList<Block>();
for (int i = 1; i <= 63; i++) {
eb = new ExtendedBlock(BLOCKPOOL, i, 1, 1000 + i);
info = new FinalizedReplica(
eb.getLocalBlock(), vol, vol.getCurrentDir().getParentFile());
ds.volumeMap.add(BLOCKPOOL, info);
info.getBlockFile().createNewFile();
info.getMetaFile().createNewFile();
blockList.add(info);
}
ds.invalidate(BLOCKPOOL, blockList.toArray(new Block[0]));
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
// Nothing to do
}
assertTrue(ds.isDeletingBlock(BLOCKPOOL, blockList.get(0).getBlockId()));
blockList.clear();
eb = new ExtendedBlock(BLOCKPOOL, 64, 1, 1064);
info = new FinalizedReplica(
eb.getLocalBlock(), vol, vol.getCurrentDir().getParentFile());
ds.volumeMap.add(BLOCKPOOL, info);
info.getBlockFile().createNewFile();
info.getMetaFile().createNewFile();
blockList.add(info);
ds.invalidate(BLOCKPOOL, blockList.toArray(new Block[0]));
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
// Nothing to do
}
assertFalse(ds.isDeletingBlock(BLOCKPOOL, blockList.get(0).getBlockId()));
} finally {
cluster.shutdown();
}
}
@Test
public void testDuplicateReplicaResolution() throws IOException {
FsVolumeImpl fsv1 = Mockito.mock(FsVolumeImpl.class);
FsVolumeImpl fsv2 = Mockito.mock(FsVolumeImpl.class);
File f1 = new File("d1/block");
File f2 = new File("d2/block");
ReplicaInfo replicaOlder = new FinalizedReplica(1,1,1,fsv1,f1);
ReplicaInfo replica = new FinalizedReplica(1,2,2,fsv1,f1);
ReplicaInfo replicaSame = new FinalizedReplica(1,2,2,fsv1,f1);
ReplicaInfo replicaNewer = new FinalizedReplica(1,3,3,fsv1,f1);
ReplicaInfo replicaOtherOlder = new FinalizedReplica(1,1,1,fsv2,f2);
ReplicaInfo replicaOtherSame = new FinalizedReplica(1,2,2,fsv2,f2);
ReplicaInfo replicaOtherNewer = new FinalizedReplica(1,3,3,fsv2,f2);
// equivalent path so don't remove either
assertNull(BlockPoolSlice.selectReplicaToDelete(replicaSame, replica));
assertNull(BlockPoolSlice.selectReplicaToDelete(replicaOlder, replica));
assertNull(BlockPoolSlice.selectReplicaToDelete(replicaNewer, replica));
// keep latest found replica
assertSame(replica,
BlockPoolSlice.selectReplicaToDelete(replicaOtherSame, replica));
assertSame(replicaOtherOlder,
BlockPoolSlice.selectReplicaToDelete(replicaOtherOlder, replica));
assertSame(replica,
BlockPoolSlice.selectReplicaToDelete(replicaOtherNewer, replica));
}
}
| 18,105 | 39.235556 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
import com.google.common.collect.Iterators;
import com.google.common.util.concurrent.Uninterruptibles;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Assert;
import org.junit.Test;
import java.io.IOException;
import java.util.EnumSet;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicBoolean;
import static org.apache.hadoop.fs.StorageType.RAM_DISK;
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.fail;
public class TestLazyPersistFiles extends LazyPersistTestCase {
private static final int THREADPOOL_SIZE = 10;
/**
* Append to lazy persist file is denied.
* @throws IOException
*/
@Test
public void testAppendIsDenied() throws IOException {
getClusterBuilder().build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path path = new Path("/" + METHOD_NAME + ".dat");
makeTestFile(path, BLOCK_SIZE, true);
try {
client.append(path.toString(), BUFFER_LENGTH,
EnumSet.of(CreateFlag.APPEND), null, null).close();
fail("Append to LazyPersist file did not fail as expected");
} catch (Throwable t) {
LOG.info("Got expected exception ", t);
}
}
/**
* Truncate to lazy persist file is denied.
* @throws IOException
*/
@Test
public void testTruncateIsDenied() throws IOException {
getClusterBuilder().build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path path = new Path("/" + METHOD_NAME + ".dat");
makeTestFile(path, BLOCK_SIZE, true);
try {
client.truncate(path.toString(), BLOCK_SIZE/2);
fail("Truncate to LazyPersist file did not fail as expected");
} catch (Throwable t) {
LOG.info("Got expected exception ", t);
}
}
/**
* If one or more replicas of a lazyPersist file are lost, then the file
* must be discarded by the NN, instead of being kept around as a
* 'corrupt' file.
*/
@Test
public void testCorruptFilesAreDiscarded()
throws IOException, InterruptedException {
getClusterBuilder().setRamDiskReplicaCapacity(2).build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
makeTestFile(path1, BLOCK_SIZE, true);
ensureFileReplicasOnStorageType(path1, RAM_DISK);
// Stop the DataNode and sleep for the time it takes the NN to
// detect the DN as being dead.
cluster.shutdownDataNodes();
Thread.sleep(30000L);
assertThat(cluster.getNamesystem().getNumDeadDataNodes(), is(1));
// Next, wait for the replication monitor to mark the file as corrupt
Thread.sleep(2 * DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT * 1000);
// Wait for the LazyPersistFileScrubber to run
Thread.sleep(2 * LAZY_WRITE_FILE_SCRUBBER_INTERVAL_SEC * 1000);
// Ensure that path1 does not exist anymore, whereas path2 does.
assert(!fs.exists(path1));
// We should have zero blocks that needs replication i.e. the one
// belonging to path2.
assertThat(cluster.getNameNode()
.getNamesystem()
.getBlockManager()
.getUnderReplicatedBlocksCount(),
is(0L));
}
@Test
public void testDisableLazyPersistFileScrubber()
throws IOException, InterruptedException {
getClusterBuilder().setRamDiskReplicaCapacity(2).disableScrubber().build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
makeTestFile(path1, BLOCK_SIZE, true);
ensureFileReplicasOnStorageType(path1, RAM_DISK);
// Stop the DataNode and sleep for the time it takes the NN to
// detect the DN as being dead.
cluster.shutdownDataNodes();
Thread.sleep(30000L);
// Next, wait for the replication monitor to mark the file as corrupt
Thread.sleep(2 * DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT * 1000);
// Wait for the LazyPersistFileScrubber to run
Thread.sleep(2 * LAZY_WRITE_FILE_SCRUBBER_INTERVAL_SEC * 1000);
// Ensure that path1 exist.
Assert.assertTrue(fs.exists(path1));
}
/**
* If NN restarted then lazyPersist files should not deleted
*/
@Test
public void testFileShouldNotDiscardedIfNNRestarted() throws IOException,
InterruptedException {
getClusterBuilder().setRamDiskReplicaCapacity(2).build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
makeTestFile(path1, BLOCK_SIZE, true);
ensureFileReplicasOnStorageType(path1, RAM_DISK);
cluster.shutdownDataNodes();
cluster.restartNameNodes();
// wait for the replication monitor to mark the file as corrupt
Thread.sleep(2 * DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT * 1000);
Long corruptBlkCount = (long) Iterators.size(cluster.getNameNode()
.getNamesystem().getBlockManager().getCorruptReplicaBlockIterator());
// Check block detected as corrupted
assertThat(corruptBlkCount, is(1L));
// Ensure path1 exist.
Assert.assertTrue(fs.exists(path1));
}
/**
* Concurrent read from the same node and verify the contents.
*/
@Test
public void testConcurrentRead()
throws Exception {
getClusterBuilder().setRamDiskReplicaCapacity(2).build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
final Path path1 = new Path("/" + METHOD_NAME + ".dat");
final int SEED = 0xFADED;
final int NUM_TASKS = 5;
makeRandomTestFile(path1, BLOCK_SIZE, true, SEED);
ensureFileReplicasOnStorageType(path1, RAM_DISK);
//Read from multiple clients
final CountDownLatch latch = new CountDownLatch(NUM_TASKS);
final AtomicBoolean testFailed = new AtomicBoolean(false);
Runnable readerRunnable = new Runnable() {
@Override
public void run() {
try {
Assert.assertTrue(verifyReadRandomFile(path1, BLOCK_SIZE, SEED));
} catch (Throwable e) {
LOG.error("readerRunnable error", e);
testFailed.set(true);
} finally {
latch.countDown();
}
}
};
Thread threads[] = new Thread[NUM_TASKS];
for (int i = 0; i < NUM_TASKS; i++) {
threads[i] = new Thread(readerRunnable);
threads[i].start();
}
Thread.sleep(500);
for (int i = 0; i < NUM_TASKS; i++) {
Uninterruptibles.joinUninterruptibly(threads[i]);
}
Assert.assertFalse(testFailed.get());
}
/**
* Concurrent write with eviction
* RAM_DISK can hold 9 replicas
* 4 threads each write 5 replicas
* @throws IOException
* @throws InterruptedException
*/
@Test
public void testConcurrentWrites()
throws IOException, InterruptedException {
getClusterBuilder().setRamDiskReplicaCapacity(9).build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
final int SEED = 0xFADED;
final int NUM_WRITERS = 4;
final int NUM_WRITER_PATHS = 5;
Path paths[][] = new Path[NUM_WRITERS][NUM_WRITER_PATHS];
for (int i = 0; i < NUM_WRITERS; i++) {
paths[i] = new Path[NUM_WRITER_PATHS];
for (int j = 0; j < NUM_WRITER_PATHS; j++) {
paths[i][j] =
new Path("/" + METHOD_NAME + ".Writer" + i + ".File." + j + ".dat");
}
}
final CountDownLatch latch = new CountDownLatch(NUM_WRITERS);
final AtomicBoolean testFailed = new AtomicBoolean(false);
ExecutorService executor = Executors.newFixedThreadPool(THREADPOOL_SIZE);
for (int i = 0; i < NUM_WRITERS; i++) {
Runnable writer = new WriterRunnable(i, paths[i], SEED, latch, testFailed);
executor.execute(writer);
}
Thread.sleep(3 * LAZY_WRITER_INTERVAL_SEC * 1000);
triggerBlockReport();
// Stop executor from adding new tasks to finish existing threads in queue
latch.await();
assertThat(testFailed.get(), is(false));
}
class WriterRunnable implements Runnable {
private final int id;
private final Path paths[];
private final int seed;
private CountDownLatch latch;
private AtomicBoolean bFail;
public WriterRunnable(int threadIndex, Path[] paths,
int seed, CountDownLatch latch,
AtomicBoolean bFail) {
id = threadIndex;
this.paths = paths;
this.seed = seed;
this.latch = latch;
this.bFail = bFail;
System.out.println("Creating Writer: " + id);
}
public void run() {
System.out.println("Writer " + id + " starting... ");
int i = 0;
try {
for (i = 0; i < paths.length; i++) {
makeRandomTestFile(paths[i], BLOCK_SIZE, true, seed);
// eviction may faiL when all blocks are not persisted yet.
// ensureFileReplicasOnStorageType(paths[i], RAM_DISK);
}
} catch (IOException e) {
bFail.set(true);
LOG.error("Writer exception: writer id:" + id +
" testfile: " + paths[i].toString() +
" " + e);
} finally {
latch.countDown();
}
}
}
}
| 10,216 | 32.28013 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.server.datanode.BlockScanner;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.RoundRobinVolumeChoosingPolicy;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.VolumeChoosingPolicy;
import org.junit.Before;
import org.junit.Test;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertNull;
import static org.mockito.Mockito.mock;
public class TestFsVolumeList {
private final Configuration conf = new Configuration();
private VolumeChoosingPolicy<FsVolumeImpl> blockChooser =
new RoundRobinVolumeChoosingPolicy<>();
private FsDatasetImpl dataset = null;
private String baseDir;
private BlockScanner blockScanner;
@Before
public void setUp() {
dataset = mock(FsDatasetImpl.class);
baseDir = new FileSystemTestHelper().getTestRootDir();
Configuration blockScannerConf = new Configuration();
blockScannerConf.setInt(DFSConfigKeys.
DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
blockScanner = new BlockScanner(null, blockScannerConf);
}
@Test
public void testGetNextVolumeWithClosedVolume() throws IOException {
FsVolumeList volumeList = new FsVolumeList(
Collections.<VolumeFailureInfo>emptyList(), blockScanner, blockChooser);
List<FsVolumeImpl> volumes = new ArrayList<>();
for (int i = 0; i < 3; i++) {
File curDir = new File(baseDir, "nextvolume-" + i);
curDir.mkdirs();
FsVolumeImpl volume = new FsVolumeImpl(dataset, "storage-id", curDir,
conf, StorageType.DEFAULT);
volume.setCapacityForTesting(1024 * 1024 * 1024);
volumes.add(volume);
volumeList.addVolume(volume.obtainReference());
}
// Close the second volume.
volumes.get(1).closeAndWait();
for (int i = 0; i < 10; i++) {
try (FsVolumeReference ref =
volumeList.getNextVolume(StorageType.DEFAULT, 128)) {
// volume No.2 will not be chosen.
assertNotEquals(ref.getVolume(), volumes.get(1));
}
}
}
@Test
public void testCheckDirsWithClosedVolume() throws IOException {
FsVolumeList volumeList = new FsVolumeList(
Collections.<VolumeFailureInfo>emptyList(), blockScanner, blockChooser);
List<FsVolumeImpl> volumes = new ArrayList<>();
for (int i = 0; i < 3; i++) {
File curDir = new File(baseDir, "volume-" + i);
curDir.mkdirs();
FsVolumeImpl volume = new FsVolumeImpl(dataset, "storage-id", curDir,
conf, StorageType.DEFAULT);
volumes.add(volume);
volumeList.addVolume(volume.obtainReference());
}
// Close the 2nd volume.
volumes.get(1).closeAndWait();
// checkDirs() should ignore the 2nd volume since it is closed.
volumeList.checkDirs();
}
@Test
public void testReleaseVolumeRefIfNoBlockScanner() throws IOException {
FsVolumeList volumeList = new FsVolumeList(
Collections.<VolumeFailureInfo>emptyList(), null, blockChooser);
File volDir = new File(baseDir, "volume-0");
volDir.mkdirs();
FsVolumeImpl volume = new FsVolumeImpl(dataset, "storage-id", volDir,
conf, StorageType.DEFAULT);
FsVolumeReference ref = volume.obtainReference();
volumeList.addVolume(ref);
assertNull(ref.getVolume());
}
}
| 4,511 | 36.915966 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
import com.google.common.base.Supplier;
import org.apache.commons.lang.UnhandledException;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import static org.apache.hadoop.fs.CreateFlag.CREATE;
import static org.apache.hadoop.fs.CreateFlag.LAZY_PERSIST;
import static org.apache.hadoop.fs.StorageType.DEFAULT;
import static org.apache.hadoop.fs.StorageType.RAM_DISK;
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.fail;
import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.EnumSet;
import java.util.HashSet;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.TimeoutException;
import com.google.common.base.Preconditions;
import org.apache.commons.io.IOUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.hdfs.tools.JMXGet;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.net.unix.TemporarySocketDirectory;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.After;
import org.junit.Rule;
import org.junit.rules.Timeout;
public abstract class LazyPersistTestCase {
static final byte LAZY_PERSIST_POLICY_ID = (byte) 15;
static {
DFSTestUtil.setNameNodeLogLevel(Level.DEBUG);
GenericTestUtils.setLogLevel(FsDatasetImpl.LOG, Level.DEBUG);
}
protected static final int BLOCK_SIZE = 5 * 1024 * 1024;
protected static final int BUFFER_LENGTH = 4096;
private static final long HEARTBEAT_INTERVAL_SEC = 1;
private static final int HEARTBEAT_RECHECK_INTERVAL_MSEC = 500;
private static final String JMX_RAM_DISK_METRICS_PATTERN = "^RamDisk";
private static final String JMX_SERVICE_NAME = "DataNode";
protected static final int LAZY_WRITE_FILE_SCRUBBER_INTERVAL_SEC = 3;
protected static final int LAZY_WRITER_INTERVAL_SEC = 1;
protected static final Log LOG = LogFactory.getLog(LazyPersistTestCase.class);
protected static final short REPL_FACTOR = 1;
protected final long osPageSize =
NativeIO.POSIX.getCacheManipulator().getOperatingSystemPageSize();
protected MiniDFSCluster cluster;
protected DistributedFileSystem fs;
protected DFSClient client;
protected JMXGet jmx;
protected TemporarySocketDirectory sockDir;
@After
public void shutDownCluster() throws Exception {
// Dump all RamDisk JMX metrics before shutdown the cluster
printRamDiskJMXMetrics();
if (fs != null) {
fs.close();
fs = null;
client = null;
}
if (cluster != null) {
cluster.shutdownDataNodes();
cluster.shutdown();
cluster = null;
}
if (jmx != null) {
jmx = null;
}
IOUtils.closeQuietly(sockDir);
sockDir = null;
}
@Rule
public Timeout timeout = new Timeout(300000);
protected final LocatedBlocks ensureFileReplicasOnStorageType(
Path path, StorageType storageType) throws IOException {
// Ensure that returned block locations returned are correct!
LOG.info("Ensure path: " + path + " is on StorageType: " + storageType);
assertThat(fs.exists(path), is(true));
long fileLength = client.getFileInfo(path.toString()).getLen();
LocatedBlocks locatedBlocks =
client.getLocatedBlocks(path.toString(), 0, fileLength);
for (LocatedBlock locatedBlock : locatedBlocks.getLocatedBlocks()) {
assertThat(locatedBlock.getStorageTypes()[0], is(storageType));
}
return locatedBlocks;
}
/**
* Make sure at least one non-transient volume has a saved copy of the replica.
* An infinite loop is used to ensure the async lazy persist tasks are completely
* done before verification. Caller of ensureLazyPersistBlocksAreSaved expects
* either a successful pass or timeout failure.
*/
protected final void ensureLazyPersistBlocksAreSaved(
LocatedBlocks locatedBlocks) throws IOException, InterruptedException {
final String bpid = cluster.getNamesystem().getBlockPoolId();
final Set<Long> persistedBlockIds = new HashSet<Long>();
try (FsDatasetSpi.FsVolumeReferences volumes =
cluster.getDataNodes().get(0).getFSDataset().getFsVolumeReferences()) {
while (persistedBlockIds.size() < locatedBlocks.getLocatedBlocks()
.size()) {
// Take 1 second sleep before each verification iteration
Thread.sleep(1000);
for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
for (FsVolumeSpi v : volumes) {
if (v.isTransientStorage()) {
continue;
}
FsVolumeImpl volume = (FsVolumeImpl) v;
File lazyPersistDir =
volume.getBlockPoolSlice(bpid).getLazypersistDir();
long blockId = lb.getBlock().getBlockId();
File targetDir =
DatanodeUtil.idToBlockDir(lazyPersistDir, blockId);
File blockFile = new File(targetDir, lb.getBlock().getBlockName());
if (blockFile.exists()) {
// Found a persisted copy for this block and added to the Set
persistedBlockIds.add(blockId);
}
}
}
}
}
// We should have found a persisted copy for each located block.
assertThat(persistedBlockIds.size(), is(locatedBlocks.getLocatedBlocks().size()));
}
protected final void makeRandomTestFile(Path path, long length,
boolean isLazyPersist, long seed) throws IOException {
DFSTestUtil.createFile(fs, path, isLazyPersist, BUFFER_LENGTH, length,
BLOCK_SIZE, REPL_FACTOR, seed, true);
}
protected final void makeTestFile(Path path, long length,
boolean isLazyPersist) throws IOException {
EnumSet<CreateFlag> createFlags = EnumSet.of(CREATE);
if (isLazyPersist) {
createFlags.add(LAZY_PERSIST);
}
FSDataOutputStream fos = null;
try {
fos =
fs.create(path,
FsPermission.getFileDefault(),
createFlags,
BUFFER_LENGTH,
REPL_FACTOR,
BLOCK_SIZE,
null);
// Allocate a block.
byte[] buffer = new byte[BUFFER_LENGTH];
for (int bytesWritten = 0; bytesWritten < length; ) {
fos.write(buffer, 0, buffer.length);
bytesWritten += buffer.length;
}
if (length > 0) {
fos.hsync();
}
} finally {
IOUtils.closeQuietly(fos);
}
}
/**
* If ramDiskStorageLimit is >=0, then RAM_DISK capacity is artificially
* capped. If ramDiskStorageLimit < 0 then it is ignored.
*/
protected final void startUpCluster(
int numDatanodes,
boolean hasTransientStorage,
StorageType[] storageTypes,
int ramDiskReplicaCapacity,
long ramDiskStorageLimit,
long maxLockedMemory,
boolean useSCR,
boolean useLegacyBlockReaderLocal,
boolean disableScrubber) throws IOException {
initCacheManipulator();
Configuration conf = new Configuration();
conf.setLong(DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
if (disableScrubber) {
conf.setInt(DFS_NAMENODE_LAZY_PERSIST_FILE_SCRUB_INTERVAL_SEC, 0);
} else {
conf.setInt(DFS_NAMENODE_LAZY_PERSIST_FILE_SCRUB_INTERVAL_SEC,
LAZY_WRITE_FILE_SCRUBBER_INTERVAL_SEC);
}
conf.setLong(DFS_HEARTBEAT_INTERVAL_KEY, HEARTBEAT_INTERVAL_SEC);
conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
HEARTBEAT_RECHECK_INTERVAL_MSEC);
conf.setInt(DFS_DATANODE_LAZY_WRITER_INTERVAL_SEC,
LAZY_WRITER_INTERVAL_SEC);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY, 1);
conf.setLong(DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, maxLockedMemory);
if (useSCR) {
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
// Do not share a client context across tests.
conf.set(DFS_CLIENT_CONTEXT, UUID.randomUUID().toString());
conf.set(DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,
UserGroupInformation.getCurrentUser().getShortUserName());
if (useLegacyBlockReaderLocal) {
conf.setBoolean(DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true);
} else {
sockDir = new TemporarySocketDirectory();
conf.set(DFS_DOMAIN_SOCKET_PATH_KEY, new File(sockDir.getDir(),
this.getClass().getSimpleName() + "._PORT.sock").getAbsolutePath());
}
}
Preconditions.checkState(
ramDiskReplicaCapacity < 0 || ramDiskStorageLimit < 0,
"Cannot specify non-default values for both ramDiskReplicaCapacity "
+ "and ramDiskStorageLimit");
long[] capacities;
if (hasTransientStorage && ramDiskReplicaCapacity >= 0) {
// Convert replica count to byte count, add some delta for .meta and
// VERSION files.
ramDiskStorageLimit = ((long) ramDiskReplicaCapacity * BLOCK_SIZE) +
(BLOCK_SIZE - 1);
}
capacities = new long[] { ramDiskStorageLimit, -1 };
cluster = new MiniDFSCluster
.Builder(conf)
.numDataNodes(numDatanodes)
.storageCapacities(capacities)
.storageTypes(storageTypes != null ? storageTypes :
(hasTransientStorage ? new StorageType[]{RAM_DISK, DEFAULT} : null))
.build();
cluster.waitActive();
fs = cluster.getFileSystem();
client = fs.getClient();
try {
jmx = initJMX();
} catch (Exception e) {
fail("Failed initialize JMX for testing: " + e);
}
LOG.info("Cluster startup complete");
}
/**
* Use a dummy cache manipulator for testing.
*/
public static void initCacheManipulator() {
NativeIO.POSIX.setCacheManipulator(new NativeIO.POSIX.CacheManipulator() {
@Override
public void mlock(String identifier,
ByteBuffer mmap, long length) throws IOException {
LOG.info("LazyPersistTestCase: faking mlock of " + identifier + " bytes.");
}
@Override
public long getMemlockLimit() {
LOG.info("LazyPersistTestCase: fake return " + Long.MAX_VALUE);
return Long.MAX_VALUE;
}
@Override
public boolean verifyCanMlock() {
LOG.info("LazyPersistTestCase: fake return " + true);
return true;
}
});
}
ClusterWithRamDiskBuilder getClusterBuilder() {
return new ClusterWithRamDiskBuilder();
}
/**
* Builder class that allows controlling RAM disk-specific properties for a
* MiniDFSCluster.
*/
class ClusterWithRamDiskBuilder {
public ClusterWithRamDiskBuilder setNumDatanodes(
int numDatanodes) {
this.numDatanodes = numDatanodes;
return this;
}
public ClusterWithRamDiskBuilder setStorageTypes(
StorageType[] storageTypes) {
this.storageTypes = storageTypes;
return this;
}
public ClusterWithRamDiskBuilder setRamDiskReplicaCapacity(
int ramDiskReplicaCapacity) {
this.ramDiskReplicaCapacity = ramDiskReplicaCapacity;
return this;
}
public ClusterWithRamDiskBuilder setRamDiskStorageLimit(
long ramDiskStorageLimit) {
this.ramDiskStorageLimit = ramDiskStorageLimit;
return this;
}
public ClusterWithRamDiskBuilder setMaxLockedMemory(long maxLockedMemory) {
this.maxLockedMemory = maxLockedMemory;
return this;
}
public ClusterWithRamDiskBuilder setUseScr(boolean useScr) {
this.useScr = useScr;
return this;
}
public ClusterWithRamDiskBuilder setHasTransientStorage(
boolean hasTransientStorage) {
this.hasTransientStorage = hasTransientStorage;
return this;
}
public ClusterWithRamDiskBuilder setUseLegacyBlockReaderLocal(
boolean useLegacyBlockReaderLocal) {
this.useLegacyBlockReaderLocal = useLegacyBlockReaderLocal;
return this;
}
public ClusterWithRamDiskBuilder disableScrubber() {
this.disableScrubber = true;
return this;
}
public void build() throws IOException {
LazyPersistTestCase.this.startUpCluster(
numDatanodes, hasTransientStorage, storageTypes, ramDiskReplicaCapacity,
ramDiskStorageLimit, maxLockedMemory, useScr, useLegacyBlockReaderLocal,
disableScrubber);
}
private int numDatanodes = REPL_FACTOR;
private StorageType[] storageTypes = null;
private int ramDiskReplicaCapacity = -1;
private long ramDiskStorageLimit = -1;
private long maxLockedMemory = Long.MAX_VALUE;
private boolean hasTransientStorage = true;
private boolean useScr = false;
private boolean useLegacyBlockReaderLocal = false;
private boolean disableScrubber=false;
}
protected final void triggerBlockReport()
throws IOException, InterruptedException {
// Trigger block report to NN
DataNodeTestUtils.triggerBlockReport(cluster.getDataNodes().get(0));
Thread.sleep(10 * 1000);
}
protected final boolean verifyBlockDeletedFromDir(File dir,
LocatedBlocks locatedBlocks) {
for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
File targetDir =
DatanodeUtil.idToBlockDir(dir, lb.getBlock().getBlockId());
File blockFile = new File(targetDir, lb.getBlock().getBlockName());
if (blockFile.exists()) {
LOG.warn("blockFile: " + blockFile.getAbsolutePath() +
" exists after deletion.");
return false;
}
File metaFile = new File(targetDir,
DatanodeUtil.getMetaName(lb.getBlock().getBlockName(),
lb.getBlock().getGenerationStamp()));
if (metaFile.exists()) {
LOG.warn("metaFile: " + metaFile.getAbsolutePath() +
" exists after deletion.");
return false;
}
}
return true;
}
protected final boolean verifyDeletedBlocks(LocatedBlocks locatedBlocks)
throws IOException, InterruptedException {
LOG.info("Verifying replica has no saved copy after deletion.");
triggerBlockReport();
while(
DataNodeTestUtils.getPendingAsyncDeletions(cluster.getDataNodes().get(0))
> 0L){
Thread.sleep(1000);
}
final String bpid = cluster.getNamesystem().getBlockPoolId();
final FsDatasetSpi<?> dataset =
cluster.getDataNodes().get(0).getFSDataset();
// Make sure deleted replica does not have a copy on either finalized dir of
// transient volume or finalized dir of non-transient volume
try (FsDatasetSpi.FsVolumeReferences volumes =
dataset.getFsVolumeReferences()) {
for (FsVolumeSpi vol : volumes) {
FsVolumeImpl volume = (FsVolumeImpl) vol;
File targetDir = (volume.isTransientStorage()) ?
volume.getBlockPoolSlice(bpid).getFinalizedDir() :
volume.getBlockPoolSlice(bpid).getLazypersistDir();
if (verifyBlockDeletedFromDir(targetDir, locatedBlocks) == false) {
return false;
}
}
}
return true;
}
protected final void verifyRamDiskJMXMetric(String metricName,
long expectedValue) throws Exception {
assertEquals(expectedValue, Integer.parseInt(jmx.getValue(metricName)));
}
protected final boolean verifyReadRandomFile(
Path path, int fileLength, int seed) throws IOException {
byte contents[] = DFSTestUtil.readFileBuffer(fs, path);
byte expected[] = DFSTestUtil.
calculateFileContentsFromSeed(seed, fileLength);
return Arrays.equals(contents, expected);
}
private JMXGet initJMX() throws Exception {
JMXGet jmx = new JMXGet();
jmx.setService(JMX_SERVICE_NAME);
jmx.init();
return jmx;
}
private void printRamDiskJMXMetrics() {
try {
if (jmx != null) {
jmx.printAllMatchedAttributes(JMX_RAM_DISK_METRICS_PATTERN);
}
} catch (Exception e) {
e.printStackTrace();
}
}
protected void waitForMetric(final String metricName, final int expectedValue)
throws TimeoutException, InterruptedException {
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
try {
final int currentValue = Integer.parseInt(jmx.getValue(metricName));
LOG.info("Waiting for " + metricName +
" to reach value " + expectedValue +
", current value = " + currentValue);
return currentValue == expectedValue;
} catch (Exception e) {
throw new UnhandledException("Test failed due to unexpected exception", e);
}
}
}, 1000, Integer.MAX_VALUE);
}
protected void triggerEviction(DataNode dn) {
FsDatasetImpl fsDataset = (FsDatasetImpl) dn.getFSDataset();
fsDataset.evictLazyPersistBlocks(Long.MAX_VALUE); // Run one eviction cycle.
}
}
| 18,700 | 34.086304 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.util.Collection;
import java.util.Iterator;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil;
import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.io.IOUtils;
import org.junit.Assert;
import org.junit.Test;
/** Test if a datanode can correctly upgrade itself */
public class TestDatanodeRestart {
// test finalized replicas persist across DataNode restarts
@Test public void testFinalizedReplicas() throws Exception {
// bring up a cluster of 3
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L);
conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
try {
// test finalized replicas
final String TopDir = "/test";
DFSTestUtil util = new DFSTestUtil.Builder().
setName("TestDatanodeRestart").setNumFiles(2).build();
util.createFiles(fs, TopDir, (short)3);
util.waitReplication(fs, TopDir, (short)3);
util.checkFiles(fs, TopDir);
cluster.restartDataNodes();
cluster.waitActive();
util.checkFiles(fs, TopDir);
} finally {
cluster.shutdown();
}
}
// test rbw replicas persist across DataNode restarts
public void testRbwReplicas() throws IOException {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L);
conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
cluster.waitActive();
try {
testRbwReplicas(cluster, false);
testRbwReplicas(cluster, true);
} finally {
cluster.shutdown();
}
}
private void testRbwReplicas(MiniDFSCluster cluster, boolean isCorrupt)
throws IOException {
FSDataOutputStream out = null;
FileSystem fs = cluster.getFileSystem();
final Path src = new Path("/test.txt");
try {
final int fileLen = 515;
// create some rbw replicas on disk
byte[] writeBuf = new byte[fileLen];
new Random().nextBytes(writeBuf);
out = fs.create(src);
out.write(writeBuf);
out.hflush();
DataNode dn = cluster.getDataNodes().get(0);
try (FsDatasetSpi.FsVolumeReferences volumes =
dataset(dn).getFsVolumeReferences()) {
for (FsVolumeSpi vol : volumes) {
final FsVolumeImpl volume = (FsVolumeImpl) vol;
File currentDir =
volume.getCurrentDir().getParentFile().getParentFile();
File rbwDir = new File(currentDir, "rbw");
for (File file : rbwDir.listFiles()) {
if (isCorrupt && Block.isBlockFilename(file)) {
new RandomAccessFile(file, "rw")
.setLength(fileLen - 1); // corrupt
}
}
}
}
cluster.restartDataNodes();
cluster.waitActive();
dn = cluster.getDataNodes().get(0);
// check volumeMap: one rwr replica
String bpid = cluster.getNamesystem().getBlockPoolId();
ReplicaMap replicas = dataset(dn).volumeMap;
Assert.assertEquals(1, replicas.size(bpid));
ReplicaInfo replica = replicas.replicas(bpid).iterator().next();
Assert.assertEquals(ReplicaState.RWR, replica.getState());
if (isCorrupt) {
Assert.assertEquals((fileLen-1)/512*512, replica.getNumBytes());
} else {
Assert.assertEquals(fileLen, replica.getNumBytes());
}
dataset(dn).invalidate(bpid, new Block[]{replica});
} finally {
IOUtils.closeStream(out);
if (fs.exists(src)) {
fs.delete(src, false);
}
fs.close();
}
}
// test recovering unlinked tmp replicas
@Test public void testRecoverReplicas() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L);
conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
try {
FileSystem fs = cluster.getFileSystem();
for (int i=0; i<4; i++) {
Path fileName = new Path("/test"+i);
DFSTestUtil.createFile(fs, fileName, 1, (short)1, 0L);
DFSTestUtil.waitReplication(fs, fileName, (short)1);
}
String bpid = cluster.getNamesystem().getBlockPoolId();
DataNode dn = cluster.getDataNodes().get(0);
Iterator<ReplicaInfo> replicasItor =
dataset(dn).volumeMap.replicas(bpid).iterator();
ReplicaInfo replica = replicasItor.next();
createUnlinkTmpFile(replica, true, true); // rename block file
createUnlinkTmpFile(replica, false, true); // rename meta file
replica = replicasItor.next();
createUnlinkTmpFile(replica, true, false); // copy block file
createUnlinkTmpFile(replica, false, false); // copy meta file
replica = replicasItor.next();
createUnlinkTmpFile(replica, true, true); // rename block file
createUnlinkTmpFile(replica, false, false); // copy meta file
cluster.restartDataNodes();
cluster.waitActive();
dn = cluster.getDataNodes().get(0);
// check volumeMap: 4 finalized replica
Collection<ReplicaInfo> replicas = dataset(dn).volumeMap.replicas(bpid);
Assert.assertEquals(4, replicas.size());
replicasItor = replicas.iterator();
while (replicasItor.hasNext()) {
Assert.assertEquals(ReplicaState.FINALIZED,
replicasItor.next().getState());
}
} finally {
cluster.shutdown();
}
}
private static FsDatasetImpl dataset(DataNode dn) {
return (FsDatasetImpl)DataNodeTestUtils.getFSDataset(dn);
}
private static void createUnlinkTmpFile(ReplicaInfo replicaInfo,
boolean changeBlockFile,
boolean isRename) throws IOException {
File src;
if (changeBlockFile) {
src = replicaInfo.getBlockFile();
} else {
src = replicaInfo.getMetaFile();
}
File dst = DatanodeUtil.getUnlinkTmpFile(src);
if (isRename) {
src.renameTo(dst);
} else {
FileInputStream in = new FileInputStream(src);
try {
FileOutputStream out = new FileOutputStream(dst);
try {
IOUtils.copyBytes(in, out, 1);
} finally {
out.close();
}
} finally {
in.close();
}
}
}
}
| 8,320 | 36.651584 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistLockedMemory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
import com.google.common.base.Supplier;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSOutputStream;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Test;
import java.io.IOException;
import java.util.EnumSet;
import java.util.concurrent.TimeoutException;
import static org.apache.hadoop.fs.CreateFlag.CREATE;
import static org.apache.hadoop.fs.CreateFlag.LAZY_PERSIST;
import static org.apache.hadoop.fs.StorageType.DEFAULT;
import static org.apache.hadoop.fs.StorageType.RAM_DISK;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertThat;
/**
* Verify that locked memory is used correctly when writing to replicas in
* memory
*/
public class TestLazyPersistLockedMemory extends LazyPersistTestCase {
/**
* RAM disk present but locked memory is set to zero. Placement should
* fall back to disk.
*/
@Test
public void testWithNoLockedMemory() throws IOException {
getClusterBuilder().setNumDatanodes(1)
.setMaxLockedMemory(0).build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path path = new Path("/" + METHOD_NAME + ".dat");
makeTestFile(path, BLOCK_SIZE, true);
ensureFileReplicasOnStorageType(path, DEFAULT);
}
@Test
public void testReservation()
throws IOException, TimeoutException, InterruptedException {
getClusterBuilder().setNumDatanodes(1)
.setMaxLockedMemory(BLOCK_SIZE).build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
final FsDatasetSpi<?> fsd = cluster.getDataNodes().get(0).getFSDataset();
// Create a file and ensure the replica in RAM_DISK uses locked memory.
Path path = new Path("/" + METHOD_NAME + ".dat");
makeTestFile(path, BLOCK_SIZE, true);
ensureFileReplicasOnStorageType(path, RAM_DISK);
assertThat(fsd.getCacheUsed(), is((long) BLOCK_SIZE));
}
@Test
public void testReleaseOnFileDeletion()
throws IOException, TimeoutException, InterruptedException {
getClusterBuilder().setNumDatanodes(1)
.setMaxLockedMemory(BLOCK_SIZE).build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
final FsDatasetSpi<?> fsd = cluster.getDataNodes().get(0).getFSDataset();
Path path = new Path("/" + METHOD_NAME + ".dat");
makeTestFile(path, BLOCK_SIZE, true);
ensureFileReplicasOnStorageType(path, RAM_DISK);
assertThat(fsd.getCacheUsed(), is((long) BLOCK_SIZE));
// Delete the file and ensure that the locked memory is released.
fs.delete(path, false);
DataNodeTestUtils.triggerBlockReport(cluster.getDataNodes().get(0));
waitForLockedBytesUsed(fsd, 0);
}
/**
* Verify that locked RAM is released when blocks are evicted from RAM disk.
*/
@Test
public void testReleaseOnEviction() throws Exception {
getClusterBuilder().setNumDatanodes(1)
.setMaxLockedMemory(BLOCK_SIZE)
.setRamDiskReplicaCapacity(BLOCK_SIZE * 2 - 1)
.build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
final FsDatasetImpl fsd =
(FsDatasetImpl) cluster.getDataNodes().get(0).getFSDataset();
Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
makeTestFile(path1, BLOCK_SIZE, true);
assertThat(fsd.getCacheUsed(), is((long) BLOCK_SIZE));
// Wait until the replica is written to persistent storage.
waitForMetric("RamDiskBlocksLazyPersisted", 1);
// Trigger eviction and verify locked bytes were released.
fsd.evictLazyPersistBlocks(Long.MAX_VALUE);
verifyRamDiskJMXMetric("RamDiskBlocksEvicted", 1);
waitForLockedBytesUsed(fsd, 0);
}
/**
* Verify that locked bytes are correctly updated when a block is finalized
* at less than its max length.
*/
@Test
public void testShortBlockFinalized()
throws IOException, TimeoutException, InterruptedException {
getClusterBuilder().setNumDatanodes(1).build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
final FsDatasetSpi<?> fsd = cluster.getDataNodes().get(0).getFSDataset();
Path path = new Path("/" + METHOD_NAME + ".dat");
makeTestFile(path, 1, true);
assertThat(fsd.getCacheUsed(), is(osPageSize));
// Delete the file and ensure locked RAM usage goes to zero.
fs.delete(path, false);
waitForLockedBytesUsed(fsd, 0);
}
/**
* Verify that locked bytes are correctly updated when the client goes
* away unexpectedly during a write.
*/
@Test
public void testWritePipelineFailure()
throws IOException, TimeoutException, InterruptedException {
getClusterBuilder().setNumDatanodes(1).build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
final FsDatasetSpi<?> fsd = cluster.getDataNodes().get(0).getFSDataset();
Path path = new Path("/" + METHOD_NAME + ".dat");
EnumSet<CreateFlag> createFlags = EnumSet.of(CREATE, LAZY_PERSIST);
// Write 1 byte to the file and kill the writer.
final FSDataOutputStream fos =
fs.create(path,
FsPermission.getFileDefault(),
createFlags,
BUFFER_LENGTH,
REPL_FACTOR,
BLOCK_SIZE,
null);
fos.write(new byte[1]);
fos.hsync();
DFSTestUtil.abortStream((DFSOutputStream) fos.getWrappedStream());
waitForLockedBytesUsed(fsd, osPageSize);
// Delete the file and ensure locked RAM goes to zero.
fs.delete(path, false);
DataNodeTestUtils.triggerBlockReport(cluster.getDataNodes().get(0));
waitForLockedBytesUsed(fsd, 0);
}
/**
* Wait until used locked byte count goes to the expected value.
* @throws TimeoutException after 300 seconds.
*/
private void waitForLockedBytesUsed(final FsDatasetSpi<?> fsd,
final long expectedLockedBytes)
throws TimeoutException, InterruptedException {
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
long cacheUsed = fsd.getCacheUsed();
LOG.info("cacheUsed=" + cacheUsed + ", waiting for it to be " + expectedLockedBytes);
if (cacheUsed < 0) {
throw new IllegalStateException("cacheUsed unpexpectedly negative");
}
return (cacheUsed == expectedLockedBytes);
}
}, 1000, 300000);
}
}
| 7,611 | 36.870647 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.SocketTimeoutException;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSClientAdapter;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
import org.apache.hadoop.hdfs.server.datanode.ReplicaUnderRecovery;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.net.NetUtils;
import org.junit.Assert;
import org.junit.Test;
import static org.junit.Assume.assumeTrue;
/**
* This tests InterDataNodeProtocol for block handling.
*/
public class TestInterDatanodeProtocol {
private static final String ADDRESS = "0.0.0.0";
final static private int PING_INTERVAL = 1000;
final static private int MIN_SLEEP_TIME = 1000;
private static final Configuration conf = new HdfsConfiguration();
private static class TestServer extends Server {
private boolean sleep;
private Class<? extends Writable> responseClass;
public TestServer(int handlerCount, boolean sleep) throws IOException {
this(handlerCount, sleep, LongWritable.class, null);
}
public TestServer(int handlerCount, boolean sleep,
Class<? extends Writable> paramClass,
Class<? extends Writable> responseClass)
throws IOException {
super(ADDRESS, 0, paramClass, handlerCount, conf);
this.sleep = sleep;
this.responseClass = responseClass;
}
@Override
public Writable call(RPC.RpcKind rpcKind, String protocol, Writable param, long receiveTime)
throws IOException {
if (sleep) {
// sleep a bit
try {
Thread.sleep(PING_INTERVAL + MIN_SLEEP_TIME);
} catch (InterruptedException e) {}
}
if (responseClass != null) {
try {
return responseClass.newInstance();
} catch (Exception e) {
throw new RuntimeException(e);
}
} else {
return param; // echo param as result
}
}
}
public static void checkMetaInfo(ExtendedBlock b, DataNode dn) throws IOException {
Block metainfo = DataNodeTestUtils.getFSDataset(dn).getStoredBlock(
b.getBlockPoolId(), b.getBlockId());
Assert.assertEquals(b.getBlockId(), metainfo.getBlockId());
Assert.assertEquals(b.getNumBytes(), metainfo.getNumBytes());
}
public static LocatedBlock getLastLocatedBlock(
ClientProtocol namenode, String src) throws IOException {
//get block info for the last block
LocatedBlocks locations = namenode.getBlockLocations(src, 0, Long.MAX_VALUE);
List<LocatedBlock> blocks = locations.getLocatedBlocks();
DataNode.LOG.info("blocks.size()=" + blocks.size());
assertTrue(blocks.size() > 0);
return blocks.get(blocks.size() - 1);
}
/** Test block MD access via a DN */
@Test
public void testBlockMetaDataInfo() throws Exception {
checkBlockMetaDataInfo(false);
}
/** The same as above, but use hostnames for DN<->DN communication */
@Test
public void testBlockMetaDataInfoWithHostname() throws Exception {
assumeTrue(System.getProperty("os.name").startsWith("Linux"));
checkBlockMetaDataInfo(true);
}
/**
* The following test first creates a file.
* It verifies the block information from a datanode.
* Then, it updates the block with new information and verifies again.
* @param useDnHostname whether DNs should connect to other DNs by hostname
*/
private void checkBlockMetaDataInfo(boolean useDnHostname) throws Exception {
MiniDFSCluster cluster = null;
conf.setBoolean(DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME, useDnHostname);
if (useDnHostname) {
// Since the mini cluster only listens on the loopback we have to
// ensure the hostname used to access DNs maps to the loopback. We
// do this by telling the DN to advertise localhost as its hostname
// instead of the default hostname.
conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "localhost");
}
try {
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(3)
.checkDataNodeHostConfig(true)
.build();
cluster.waitActive();
//create a file
DistributedFileSystem dfs = cluster.getFileSystem();
String filestr = "/foo";
Path filepath = new Path(filestr);
DFSTestUtil.createFile(dfs, filepath, 1024L, (short)3, 0L);
assertTrue(dfs.exists(filepath));
//get block info
LocatedBlock locatedblock = getLastLocatedBlock(
DFSClientAdapter.getDFSClient(dfs).getNamenode(), filestr);
DatanodeInfo[] datanodeinfo = locatedblock.getLocations();
assertTrue(datanodeinfo.length > 0);
//connect to a data node
DataNode datanode = cluster.getDataNode(datanodeinfo[0].getIpcPort());
InterDatanodeProtocol idp = DataNodeTestUtils.createInterDatanodeProtocolProxy(
datanode, datanodeinfo[0], conf, useDnHostname);
// Stop the block scanners.
datanode.getBlockScanner().removeAllVolumeScanners();
//verify BlockMetaDataInfo
ExtendedBlock b = locatedblock.getBlock();
InterDatanodeProtocol.LOG.info("b=" + b + ", " + b.getClass());
checkMetaInfo(b, datanode);
long recoveryId = b.getGenerationStamp() + 1;
idp.initReplicaRecovery(
new RecoveringBlock(b, locatedblock.getLocations(), recoveryId));
//verify updateBlock
ExtendedBlock newblock = new ExtendedBlock(b.getBlockPoolId(),
b.getBlockId(), b.getNumBytes()/2, b.getGenerationStamp()+1);
idp.updateReplicaUnderRecovery(b, recoveryId, b.getBlockId(),
newblock.getNumBytes());
checkMetaInfo(newblock, datanode);
// Verify correct null response trying to init recovery for a missing block
ExtendedBlock badBlock = new ExtendedBlock("fake-pool",
b.getBlockId(), 0, 0);
assertNull(idp.initReplicaRecovery(
new RecoveringBlock(badBlock,
locatedblock.getLocations(), recoveryId)));
}
finally {
if (cluster != null) {cluster.shutdown();}
}
}
private static ReplicaInfo createReplicaInfo(Block b) {
return new FinalizedReplica(b, null, null);
}
private static void assertEquals(ReplicaInfo originalInfo, ReplicaRecoveryInfo recoveryInfo) {
Assert.assertEquals(originalInfo.getBlockId(), recoveryInfo.getBlockId());
Assert.assertEquals(originalInfo.getGenerationStamp(), recoveryInfo.getGenerationStamp());
Assert.assertEquals(originalInfo.getBytesOnDisk(), recoveryInfo.getNumBytes());
Assert.assertEquals(originalInfo.getState(), recoveryInfo.getOriginalReplicaState());
}
/** Test
* {@link FsDatasetImpl#initReplicaRecovery(String, ReplicaMap, Block, long, long)}
*/
@Test
public void testInitReplicaRecovery() throws IOException {
final long firstblockid = 10000L;
final long gs = 7777L;
final long length = 22L;
final ReplicaMap map = new ReplicaMap(this);
String bpid = "BP-TEST";
final Block[] blocks = new Block[5];
for(int i = 0; i < blocks.length; i++) {
blocks[i] = new Block(firstblockid + i, length, gs);
map.add(bpid, createReplicaInfo(blocks[i]));
}
{
//normal case
final Block b = blocks[0];
final ReplicaInfo originalInfo = map.get(bpid, b);
final long recoveryid = gs + 1;
final ReplicaRecoveryInfo recoveryInfo = FsDatasetImpl
.initReplicaRecovery(bpid, map, blocks[0], recoveryid,
DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
assertEquals(originalInfo, recoveryInfo);
final ReplicaUnderRecovery updatedInfo = (ReplicaUnderRecovery)map.get(bpid, b);
Assert.assertEquals(originalInfo.getBlockId(), updatedInfo.getBlockId());
Assert.assertEquals(recoveryid, updatedInfo.getRecoveryID());
//recover one more time
final long recoveryid2 = gs + 2;
final ReplicaRecoveryInfo recoveryInfo2 = FsDatasetImpl
.initReplicaRecovery(bpid, map, blocks[0], recoveryid2,
DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
assertEquals(originalInfo, recoveryInfo2);
final ReplicaUnderRecovery updatedInfo2 = (ReplicaUnderRecovery)map.get(bpid, b);
Assert.assertEquals(originalInfo.getBlockId(), updatedInfo2.getBlockId());
Assert.assertEquals(recoveryid2, updatedInfo2.getRecoveryID());
//case RecoveryInProgressException
try {
FsDatasetImpl.initReplicaRecovery(bpid, map, b, recoveryid,
DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
Assert.fail();
}
catch(RecoveryInProgressException ripe) {
System.out.println("GOOD: getting " + ripe);
}
}
{ // BlockRecoveryFI_01: replica not found
final long recoveryid = gs + 1;
final Block b = new Block(firstblockid - 1, length, gs);
ReplicaRecoveryInfo r = FsDatasetImpl.initReplicaRecovery(bpid, map, b,
recoveryid,
DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
Assert.assertNull("Data-node should not have this replica.", r);
}
{ // BlockRecoveryFI_02: "THIS IS NOT SUPPOSED TO HAPPEN" with recovery id < gs
final long recoveryid = gs - 1;
final Block b = new Block(firstblockid + 1, length, gs);
try {
FsDatasetImpl.initReplicaRecovery(bpid, map, b, recoveryid,
DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
Assert.fail();
}
catch(IOException ioe) {
System.out.println("GOOD: getting " + ioe);
}
}
// BlockRecoveryFI_03: Replica's gs is less than the block's gs
{
final long recoveryid = gs + 1;
final Block b = new Block(firstblockid, length, gs+1);
try {
FsDatasetImpl.initReplicaRecovery(bpid, map, b, recoveryid,
DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
fail("InitReplicaRecovery should fail because replica's " +
"gs is less than the block's gs");
} catch (IOException e) {
e.getMessage().startsWith(
"replica.getGenerationStamp() < block.getGenerationStamp(), block=");
}
}
}
/**
* Test for
* {@link FsDatasetImpl#updateReplicaUnderRecovery(ExtendedBlock, long, long)}
* */
@Test
public void testUpdateReplicaUnderRecovery() throws IOException {
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
String bpid = cluster.getNamesystem().getBlockPoolId();
//create a file
DistributedFileSystem dfs = cluster.getFileSystem();
String filestr = "/foo";
Path filepath = new Path(filestr);
DFSTestUtil.createFile(dfs, filepath, 1024L, (short)3, 0L);
//get block info
final LocatedBlock locatedblock = getLastLocatedBlock(
DFSClientAdapter.getDFSClient(dfs).getNamenode(), filestr);
final DatanodeInfo[] datanodeinfo = locatedblock.getLocations();
Assert.assertTrue(datanodeinfo.length > 0);
//get DataNode and FSDataset objects
final DataNode datanode = cluster.getDataNode(datanodeinfo[0].getIpcPort());
Assert.assertTrue(datanode != null);
//initReplicaRecovery
final ExtendedBlock b = locatedblock.getBlock();
final long recoveryid = b.getGenerationStamp() + 1;
final long newlength = b.getNumBytes() - 1;
final FsDatasetSpi<?> fsdataset = DataNodeTestUtils.getFSDataset(datanode);
final ReplicaRecoveryInfo rri = fsdataset.initReplicaRecovery(
new RecoveringBlock(b, null, recoveryid));
//check replica
final ReplicaInfo replica = FsDatasetTestUtil.fetchReplicaInfo(
fsdataset, bpid, b.getBlockId());
Assert.assertEquals(ReplicaState.RUR, replica.getState());
//check meta data before update
FsDatasetImpl.checkReplicaFiles(replica);
//case "THIS IS NOT SUPPOSED TO HAPPEN"
//with (block length) != (stored replica's on disk length).
{
//create a block with same id and gs but different length.
final ExtendedBlock tmp = new ExtendedBlock(b.getBlockPoolId(), rri
.getBlockId(), rri.getNumBytes() - 1, rri.getGenerationStamp());
try {
//update should fail
fsdataset.updateReplicaUnderRecovery(tmp, recoveryid,
tmp.getBlockId(), newlength);
Assert.fail();
} catch(IOException ioe) {
System.out.println("GOOD: getting " + ioe);
}
}
//update
final String storageID = fsdataset.updateReplicaUnderRecovery(
new ExtendedBlock(b.getBlockPoolId(), rri), recoveryid,
rri.getBlockId(), newlength);
assertTrue(storageID != null);
} finally {
if (cluster != null) cluster.shutdown();
}
}
/** Test to verify that InterDatanode RPC timesout as expected when
* the server DN does not respond.
*/
@Test(expected=SocketTimeoutException.class)
public void testInterDNProtocolTimeout() throws Throwable {
final Server server = new TestServer(1, true);
server.start();
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
DatanodeInfo dInfo = new DatanodeInfo(fakeDnId);
InterDatanodeProtocol proxy = null;
try {
proxy = DataNode.createInterDataNodeProtocolProxy(
dInfo, conf, 500, false);
proxy.initReplicaRecovery(new RecoveringBlock(
new ExtendedBlock("bpid", 1), null, 100));
fail ("Expected SocketTimeoutException exception, but did not get.");
} finally {
if (proxy != null) {
RPC.stopProxy(proxy);
}
server.stop();
}
}
}
| 16,265 | 37.913876 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaMap.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.fail;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
import org.junit.Before;
import org.junit.Test;
/**
* Unit test for ReplicasMap class
*/
public class TestReplicaMap {
private final ReplicaMap map = new ReplicaMap(TestReplicaMap.class);
private final String bpid = "BP-TEST";
private final Block block = new Block(1234, 1234, 1234);
@Before
public void setup() {
map.add(bpid, new FinalizedReplica(block, null, null));
}
/**
* Test for ReplicasMap.get(Block) and ReplicasMap.get(long) tests
*/
@Test
public void testGet() {
// Test 1: null argument throws invalid argument exception
try {
map.get(bpid, null);
fail("Expected exception not thrown");
} catch (IllegalArgumentException expected) { }
// Test 2: successful lookup based on block
assertNotNull(map.get(bpid, block));
// Test 3: Lookup failure - generation stamp mismatch
Block b = new Block(block);
b.setGenerationStamp(0);
assertNull(map.get(bpid, b));
// Test 4: Lookup failure - blockID mismatch
b.setGenerationStamp(block.getGenerationStamp());
b.setBlockId(0);
assertNull(map.get(bpid, b));
// Test 5: successful lookup based on block ID
assertNotNull(map.get(bpid, block.getBlockId()));
// Test 6: failed lookup for invalid block ID
assertNull(map.get(bpid, 0));
}
@Test
public void testAdd() {
// Test 1: null argument throws invalid argument exception
try {
map.add(bpid, null);
fail("Expected exception not thrown");
} catch (IllegalArgumentException expected) { }
}
@Test
public void testRemove() {
// Test 1: null argument throws invalid argument exception
try {
map.remove(bpid, null);
fail("Expected exception not thrown");
} catch (IllegalArgumentException expected) { }
// Test 2: remove failure - generation stamp mismatch
Block b = new Block(block);
b.setGenerationStamp(0);
assertNull(map.remove(bpid, b));
// Test 3: remove failure - blockID mismatch
b.setGenerationStamp(block.getGenerationStamp());
b.setBlockId(0);
assertNull(map.remove(bpid, b));
// Test 4: remove success
assertNotNull(map.remove(bpid, block));
// Test 5: remove failure - invalid blockID
assertNull(map.remove(bpid, 0));
// Test 6: remove success
map.add(bpid, new FinalizedReplica(block, null, null));
assertNotNull(map.remove(bpid, block.getBlockId()));
}
}
| 3,570 | 31.171171 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestRbwSpaceReservation.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
import com.google.common.base.Supplier;
import org.apache.commons.io.IOUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.*;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Daemon;
import org.apache.log4j.Level;
import org.junit.After;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import org.mockito.Mockito;
import java.io.IOException;
import java.io.OutputStream;
import java.lang.management.ManagementFactory;
import java.lang.reflect.Field;
import java.util.Map;
import java.util.Random;
import java.util.concurrent.TimeoutException;
import javax.management.MBeanServer;
import javax.management.ObjectName;
/**
* Ensure that the DN reserves disk space equivalent to a full block for
* replica being written (RBW).
*/
public class TestRbwSpaceReservation {
static final Log LOG = LogFactory.getLog(TestRbwSpaceReservation.class);
private static final int DU_REFRESH_INTERVAL_MSEC = 500;
private static final int STORAGES_PER_DATANODE = 1;
private static final int BLOCK_SIZE = 1024 * 1024;
private static final int SMALL_BLOCK_SIZE = 1024;
protected MiniDFSCluster cluster;
private Configuration conf;
private DistributedFileSystem fs = null;
private DFSClient client = null;
FsVolumeReference singletonVolumeRef = null;
FsVolumeImpl singletonVolume = null;
private static Random rand = new Random();
private void initConfig(int blockSize) {
conf = new HdfsConfiguration();
// Refresh disk usage information frequently.
conf.setInt(FS_DU_INTERVAL_KEY, DU_REFRESH_INTERVAL_MSEC);
conf.setLong(DFS_BLOCK_SIZE_KEY, blockSize);
// Disable the scanner
conf.setInt(DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
}
static {
((Log4JLogger) FsDatasetImpl.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) DataNode.LOG).getLogger().setLevel(Level.ALL);
}
/**
*
* @param blockSize
* @param perVolumeCapacity limit the capacity of each volume to the given
* value. If negative, then don't limit.
* @throws IOException
*/
private void startCluster(int blockSize, int numDatanodes, long perVolumeCapacity) throws IOException {
initConfig(blockSize);
cluster = new MiniDFSCluster
.Builder(conf)
.storagesPerDatanode(STORAGES_PER_DATANODE)
.numDataNodes(numDatanodes)
.build();
fs = cluster.getFileSystem();
client = fs.getClient();
cluster.waitActive();
if (perVolumeCapacity >= 0) {
try (FsDatasetSpi.FsVolumeReferences volumes =
cluster.getDataNodes().get(0).getFSDataset().getFsVolumeReferences()) {
singletonVolumeRef = volumes.get(0).obtainReference();
}
singletonVolume = ((FsVolumeImpl) singletonVolumeRef.getVolume());
singletonVolume.setCapacityForTesting(perVolumeCapacity);
}
}
@After
public void shutdownCluster() throws IOException {
if (singletonVolumeRef != null) {
singletonVolumeRef.close();
singletonVolumeRef = null;
}
if (client != null) {
client.close();
client = null;
}
if (fs != null) {
fs.close();
fs = null;
}
if (cluster != null) {
cluster.shutdown();
cluster = null;
}
}
private void createFileAndTestSpaceReservation(
final String fileNamePrefix, final int fileBlockSize)
throws IOException, InterruptedException {
// Enough for 1 block + meta files + some delta.
final long configuredCapacity = fileBlockSize * 2 - 1;
startCluster(BLOCK_SIZE, 1, configuredCapacity);
FSDataOutputStream out = null;
Path path = new Path("/" + fileNamePrefix + ".dat");
try {
out = fs.create(path, false, 4096, (short) 1, fileBlockSize);
byte[] buffer = new byte[rand.nextInt(fileBlockSize / 4)];
out.write(buffer);
out.hsync();
int bytesWritten = buffer.length;
// Check that space was reserved for a full block minus the bytesWritten.
assertThat(singletonVolume.getReservedForRbw(),
is((long) fileBlockSize - bytesWritten));
out.close();
out = null;
// Check that the reserved space has been released since we closed the
// file.
assertThat(singletonVolume.getReservedForRbw(), is(0L));
// Reopen the file for appends and write 1 more byte.
out = fs.append(path);
out.write(buffer);
out.hsync();
bytesWritten += buffer.length;
// Check that space was again reserved for a full block minus the
// bytesWritten so far.
assertThat(singletonVolume.getReservedForRbw(),
is((long) fileBlockSize - bytesWritten));
// Write once again and again verify the available space. This ensures
// that the reserved space is progressively adjusted to account for bytes
// written to disk.
out.write(buffer);
out.hsync();
bytesWritten += buffer.length;
assertThat(singletonVolume.getReservedForRbw(),
is((long) fileBlockSize - bytesWritten));
} finally {
if (out != null) {
out.close();
}
}
}
@Test (timeout=300000)
public void testWithDefaultBlockSize()
throws IOException, InterruptedException {
createFileAndTestSpaceReservation(GenericTestUtils.getMethodName(), BLOCK_SIZE);
}
@Test (timeout=300000)
public void testWithNonDefaultBlockSize()
throws IOException, InterruptedException {
// Same test as previous one, but with a non-default block size.
createFileAndTestSpaceReservation(GenericTestUtils.getMethodName(), BLOCK_SIZE * 2);
}
@Rule
public ExpectedException thrown = ExpectedException.none();
@Test (timeout=300000)
public void testWithLimitedSpace() throws IOException {
// Cluster with just enough space for a full block + meta.
startCluster(BLOCK_SIZE, 1, 2 * BLOCK_SIZE - 1);
final String methodName = GenericTestUtils.getMethodName();
Path file1 = new Path("/" + methodName + ".01.dat");
Path file2 = new Path("/" + methodName + ".02.dat");
// Create two files.
FSDataOutputStream os1 = null, os2 = null;
try {
os1 = fs.create(file1);
os2 = fs.create(file2);
// Write one byte to the first file.
byte[] data = new byte[1];
os1.write(data);
os1.hsync();
// Try to write one byte to the second file.
// The block allocation must fail.
thrown.expect(RemoteException.class);
os2.write(data);
os2.hsync();
} finally {
if (os1 != null) {
os1.close();
}
// os2.close() will fail as no block was allocated.
}
}
/**
* Ensure that reserved space is released when the client goes away
* unexpectedly.
*
* The verification is done for each replica in the write pipeline.
*
* @throws IOException
*/
@Test(timeout=300000)
public void testSpaceReleasedOnUnexpectedEof()
throws IOException, InterruptedException, TimeoutException {
final short replication = 3;
startCluster(BLOCK_SIZE, replication, -1);
final String methodName = GenericTestUtils.getMethodName();
final Path file = new Path("/" + methodName + ".01.dat");
// Write 1 byte to the file and kill the writer.
FSDataOutputStream os = fs.create(file, replication);
os.write(new byte[1]);
os.hsync();
DFSTestUtil.abortStream((DFSOutputStream) os.getWrappedStream());
// Ensure all space reserved for the replica was released on each
// DataNode.
for (DataNode dn : cluster.getDataNodes()) {
try (FsDatasetSpi.FsVolumeReferences volumes =
dn.getFSDataset().getFsVolumeReferences()) {
final FsVolumeImpl volume = (FsVolumeImpl) volumes.get(0);
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
return (volume.getReservedForRbw() == 0);
}
}, 500, Integer.MAX_VALUE); // Wait until the test times out.
}
}
}
@SuppressWarnings("unchecked")
@Test(timeout = 30000)
public void testRBWFileCreationError() throws Exception {
final short replication = 1;
startCluster(BLOCK_SIZE, replication, -1);
final FsVolumeImpl fsVolumeImpl = (FsVolumeImpl) cluster.getDataNodes()
.get(0).getFSDataset().getFsVolumeReferences().get(0);
final String methodName = GenericTestUtils.getMethodName();
final Path file = new Path("/" + methodName + ".01.dat");
// Mock BlockPoolSlice so that RBW file creation gives IOExcception
BlockPoolSlice blockPoolSlice = Mockito.mock(BlockPoolSlice.class);
Mockito.when(blockPoolSlice.createRbwFile((Block) Mockito.any()))
.thenThrow(new IOException("Synthetic IO Exception Throgh MOCK"));
Field field = FsVolumeImpl.class.getDeclaredField("bpSlices");
field.setAccessible(true);
Map<String, BlockPoolSlice> bpSlices = (Map<String, BlockPoolSlice>) field
.get(fsVolumeImpl);
bpSlices.put(fsVolumeImpl.getBlockPoolList()[0], blockPoolSlice);
try {
// Write 1 byte to the file
FSDataOutputStream os = fs.create(file, replication);
os.write(new byte[1]);
os.hsync();
os.close();
fail("Expecting IOException file creation failure");
} catch (IOException e) {
// Exception can be ignored (expected)
}
// Ensure RBW space reserved is released
assertTrue("Expected ZERO but got " + fsVolumeImpl.getReservedForRbw(),
fsVolumeImpl.getReservedForRbw() == 0);
}
@Test(timeout = 30000)
public void testRBWInJMXBean() throws Exception {
final short replication = 1;
startCluster(BLOCK_SIZE, replication, -1);
final String methodName = GenericTestUtils.getMethodName();
final Path file = new Path("/" + methodName + ".01.dat");
try (FSDataOutputStream os = fs.create(file, replication)) {
// Write 1 byte to the file
os.write(new byte[1]);
os.hsync();
final MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
final ObjectName mxbeanName = new ObjectName(
"Hadoop:service=DataNode,name=DataNodeInfo");
final String volumeInfo = (String) mbs.getAttribute(mxbeanName,
"VolumeInfo");
assertTrue(volumeInfo.contains("reservedSpaceForRBW"));
}
}
/**
* Stress test to ensure we are not leaking reserved space.
* @throws IOException
* @throws InterruptedException
*/
@Test (timeout=600000)
public void stressTest() throws IOException, InterruptedException {
final int numWriters = 5;
startCluster(SMALL_BLOCK_SIZE, 1, SMALL_BLOCK_SIZE * numWriters * 10);
Writer[] writers = new Writer[numWriters];
// Start a few writers and let them run for a while.
for (int i = 0; i < numWriters; ++i) {
writers[i] = new Writer(client, SMALL_BLOCK_SIZE);
writers[i].start();
}
Thread.sleep(60000);
// Stop the writers.
for (Writer w : writers) {
w.stopWriter();
}
int filesCreated = 0;
int numFailures = 0;
for (Writer w : writers) {
w.join();
filesCreated += w.getFilesCreated();
numFailures += w.getNumFailures();
}
LOG.info("Stress test created " + filesCreated +
" files and hit " + numFailures + " failures");
// Check no space was leaked.
assertThat(singletonVolume.getReservedForRbw(), is(0L));
}
private static class Writer extends Daemon {
private volatile boolean keepRunning;
private final DFSClient localClient;
private int filesCreated = 0;
private int numFailures = 0;
byte[] data;
Writer(DFSClient client, int blockSize) throws IOException {
localClient = client;
keepRunning = true;
filesCreated = 0;
numFailures = 0;
// At least some of the files should span a block boundary.
data = new byte[blockSize * 2];
}
@Override
public void run() {
/**
* Create a file, write up to 3 blocks of data and close the file.
* Do this in a loop until we are told to stop.
*/
while (keepRunning) {
OutputStream os = null;
try {
String filename = "/file-" + rand.nextLong();
os = localClient.create(filename, false);
os.write(data, 0, rand.nextInt(data.length));
IOUtils.closeQuietly(os);
os = null;
localClient.delete(filename, false);
Thread.sleep(50); // Sleep for a bit to avoid killing the system.
++filesCreated;
} catch (IOException ioe) {
// Just ignore the exception and keep going.
++numFailures;
} catch (InterruptedException ie) {
return;
} finally {
if (os != null) {
IOUtils.closeQuietly(os);
}
}
}
}
public void stopWriter() {
keepRunning = false;
}
public int getFilesCreated() {
return filesCreated;
}
public int getNumFailures() {
return numFailures;
}
}
}
| 14,680 | 31.408389 | 105 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistReplicaRecovery.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Test;
import java.io.IOException;
import static org.apache.hadoop.fs.StorageType.DEFAULT;
import static org.apache.hadoop.fs.StorageType.RAM_DISK;
public class TestLazyPersistReplicaRecovery extends LazyPersistTestCase {
@Test
public void testDnRestartWithSavedReplicas()
throws IOException, InterruptedException {
getClusterBuilder().build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
makeTestFile(path1, BLOCK_SIZE, true);
ensureFileReplicasOnStorageType(path1, RAM_DISK);
// Sleep for a short time to allow the lazy writer thread to do its job.
// However the block replica should not be evicted from RAM_DISK yet.
Thread.sleep(3 * LAZY_WRITER_INTERVAL_SEC * 1000);
ensureFileReplicasOnStorageType(path1, RAM_DISK);
LOG.info("Restarting the DataNode");
cluster.restartDataNode(0, true);
cluster.waitActive();
triggerBlockReport();
// Ensure that the replica is now on persistent storage.
ensureFileReplicasOnStorageType(path1, DEFAULT);
}
@Test
public void testDnRestartWithUnsavedReplicas()
throws IOException, InterruptedException {
getClusterBuilder().build();
FsDatasetTestUtil.stopLazyWriter(cluster.getDataNodes().get(0));
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
makeTestFile(path1, BLOCK_SIZE, true);
ensureFileReplicasOnStorageType(path1, RAM_DISK);
LOG.info("Restarting the DataNode");
cluster.restartDataNode(0, true);
cluster.waitActive();
// Ensure that the replica is still on transient storage.
ensureFileReplicasOnStorageType(path1, RAM_DISK);
}
}
| 2,726 | 34.881579 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyWriter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Assert;
import org.junit.Test;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.concurrent.TimeoutException;
import static org.apache.hadoop.fs.StorageType.DEFAULT;
import static org.apache.hadoop.fs.StorageType.RAM_DISK;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
public class TestLazyWriter extends LazyPersistTestCase {
@Test
public void testLazyPersistBlocksAreSaved()
throws IOException, InterruptedException, TimeoutException {
getClusterBuilder().build();
final int NUM_BLOCKS = 10;
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path path = new Path("/" + METHOD_NAME + ".dat");
// Create a test file
makeTestFile(path, BLOCK_SIZE * NUM_BLOCKS, true);
LocatedBlocks locatedBlocks = ensureFileReplicasOnStorageType(path, RAM_DISK);
waitForMetric("RamDiskBlocksLazyPersisted", NUM_BLOCKS);
LOG.info("Verifying copy was saved to lazyPersist/");
// Make sure that there is a saved copy of the replica on persistent
// storage.
ensureLazyPersistBlocksAreSaved(locatedBlocks);
}
@Test
public void testSynchronousEviction() throws Exception {
getClusterBuilder().setMaxLockedMemory(BLOCK_SIZE).build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
final Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
makeTestFile(path1, BLOCK_SIZE, true);
ensureFileReplicasOnStorageType(path1, RAM_DISK);
// Wait until the replica is written to persistent storage.
waitForMetric("RamDiskBlocksLazyPersisted", 1);
// Ensure that writing a new file to RAM DISK evicts the block
// for the previous one.
Path path2 = new Path("/" + METHOD_NAME + ".02.dat");
makeTestFile(path2, BLOCK_SIZE, true);
verifyRamDiskJMXMetric("RamDiskBlocksEvicted", 1);
verifyRamDiskJMXMetric("RamDiskBlocksEvictedWithoutRead", 1);
}
/**
* RamDisk eviction should not happen on blocks that are not yet
* persisted on disk.
* @throws IOException
* @throws InterruptedException
*/
@Test
public void testRamDiskEvictionBeforePersist()
throws Exception {
getClusterBuilder().setMaxLockedMemory(BLOCK_SIZE).build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
Path path2 = new Path("/" + METHOD_NAME + ".02.dat");
final int SEED = 0XFADED;
// Stop lazy writer to ensure block for path1 is not persisted to disk.
FsDatasetTestUtil.stopLazyWriter(cluster.getDataNodes().get(0));
makeRandomTestFile(path1, BLOCK_SIZE, true, SEED);
ensureFileReplicasOnStorageType(path1, RAM_DISK);
// Create second file with a replica on RAM_DISK.
makeTestFile(path2, BLOCK_SIZE, true);
// Eviction should not happen for block of the first file that is not
// persisted yet.
verifyRamDiskJMXMetric("RamDiskBlocksEvicted", 0);
ensureFileReplicasOnStorageType(path1, RAM_DISK);
ensureFileReplicasOnStorageType(path2, DEFAULT);
assert(fs.exists(path1));
assert(fs.exists(path2));
assertTrue(verifyReadRandomFile(path1, BLOCK_SIZE, SEED));
}
/**
* Validates lazy persisted blocks are evicted from RAM_DISK based on LRU.
* @throws IOException
* @throws InterruptedException
*/
@Test
public void testRamDiskEvictionIsLru()
throws Exception {
final int NUM_PATHS = 5;
getClusterBuilder().setMaxLockedMemory(NUM_PATHS * BLOCK_SIZE).build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path paths[] = new Path[NUM_PATHS * 2];
for (int i = 0; i < paths.length; i++) {
paths[i] = new Path("/" + METHOD_NAME + "." + i +".dat");
}
for (int i = 0; i < NUM_PATHS; i++) {
makeTestFile(paths[i], BLOCK_SIZE, true);
}
waitForMetric("RamDiskBlocksLazyPersisted", NUM_PATHS);
for (int i = 0; i < NUM_PATHS; ++i) {
ensureFileReplicasOnStorageType(paths[i], RAM_DISK);
}
// Open the files for read in a random order.
ArrayList<Integer> indexes = new ArrayList<Integer>(NUM_PATHS);
for (int i = 0; i < NUM_PATHS; ++i) {
indexes.add(i);
}
Collections.shuffle(indexes);
for (int i = 0; i < NUM_PATHS; ++i) {
LOG.info("Touching file " + paths[indexes.get(i)]);
DFSTestUtil.readFile(fs, paths[indexes.get(i)]);
}
// Create an equal number of new files ensuring that the previous
// files are evicted in the same order they were read.
for (int i = 0; i < NUM_PATHS; ++i) {
makeTestFile(paths[i + NUM_PATHS], BLOCK_SIZE, true);
triggerBlockReport();
Thread.sleep(3000);
ensureFileReplicasOnStorageType(paths[i + NUM_PATHS], RAM_DISK);
ensureFileReplicasOnStorageType(paths[indexes.get(i)], DEFAULT);
for (int j = i + 1; j < NUM_PATHS; ++j) {
ensureFileReplicasOnStorageType(paths[indexes.get(j)], RAM_DISK);
}
}
verifyRamDiskJMXMetric("RamDiskBlocksWrite", NUM_PATHS * 2);
verifyRamDiskJMXMetric("RamDiskBlocksWriteFallback", 0);
verifyRamDiskJMXMetric("RamDiskBytesWrite", BLOCK_SIZE * NUM_PATHS * 2);
verifyRamDiskJMXMetric("RamDiskBlocksReadHits", NUM_PATHS);
verifyRamDiskJMXMetric("RamDiskBlocksEvicted", NUM_PATHS);
verifyRamDiskJMXMetric("RamDiskBlocksEvictedWithoutRead", 0);
verifyRamDiskJMXMetric("RamDiskBlocksDeletedBeforeLazyPersisted", 0);
}
/**
* Delete lazy-persist file that has not been persisted to disk.
* Memory is freed up and file is gone.
* @throws IOException
*/
@Test
public void testDeleteBeforePersist()
throws Exception {
getClusterBuilder().build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
FsDatasetTestUtil.stopLazyWriter(cluster.getDataNodes().get(0));
Path path = new Path("/" + METHOD_NAME + ".dat");
makeTestFile(path, BLOCK_SIZE, true);
LocatedBlocks locatedBlocks =
ensureFileReplicasOnStorageType(path, RAM_DISK);
// Delete before persist
client.delete(path.toString(), false);
Assert.assertFalse(fs.exists(path));
assertThat(verifyDeletedBlocks(locatedBlocks), is(true));
verifyRamDiskJMXMetric("RamDiskBlocksDeletedBeforeLazyPersisted", 1);
}
/**
* Delete lazy-persist file that has been persisted to disk
* Both memory blocks and disk blocks are deleted.
* @throws IOException
* @throws InterruptedException
*/
@Test
public void testDeleteAfterPersist()
throws Exception {
getClusterBuilder().build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path path = new Path("/" + METHOD_NAME + ".dat");
makeTestFile(path, BLOCK_SIZE, true);
LocatedBlocks locatedBlocks = ensureFileReplicasOnStorageType(path, RAM_DISK);
waitForMetric("RamDiskBlocksLazyPersisted", 1);
// Delete after persist
client.delete(path.toString(), false);
Assert.assertFalse(fs.exists(path));
assertThat(verifyDeletedBlocks(locatedBlocks), is(true));
verifyRamDiskJMXMetric("RamDiskBlocksLazyPersisted", 1);
verifyRamDiskJMXMetric("RamDiskBytesLazyPersisted", BLOCK_SIZE);
}
/**
* RAM_DISK used/free space
* @throws IOException
* @throws InterruptedException
*/
@Test
public void testDfsUsageCreateDelete()
throws IOException, InterruptedException, TimeoutException {
getClusterBuilder().setRamDiskReplicaCapacity(4).build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path path = new Path("/" + METHOD_NAME + ".dat");
// Get the usage before write BLOCK_SIZE
long usedBeforeCreate = fs.getUsed();
makeTestFile(path, BLOCK_SIZE, true);
long usedAfterCreate = fs.getUsed();
assertThat(usedAfterCreate, is((long) BLOCK_SIZE));
waitForMetric("RamDiskBlocksLazyPersisted", 1);
long usedAfterPersist = fs.getUsed();
assertThat(usedAfterPersist, is((long) BLOCK_SIZE));
// Delete after persist
client.delete(path.toString(), false);
long usedAfterDelete = fs.getUsed();
assertThat(usedBeforeCreate, is(usedAfterDelete));
}
}
| 9,253 | 34.72973 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.nio.channels.FileChannel;
import java.nio.channels.FileLock;
import java.nio.channels.OverlappingFileLockException;
import java.util.Collection;
import java.util.Random;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.io.IOUtils;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.fail;
public class FsDatasetTestUtil {
public static File getFile(FsDatasetSpi<?> fsd, String bpid, long bid) {
return ((FsDatasetImpl)fsd).getFile(bpid, bid, false);
}
public static File getBlockFile(FsDatasetSpi<?> fsd, String bpid, Block b
) throws IOException {
return ((FsDatasetImpl)fsd).getBlockFile(bpid, b.getBlockId());
}
public static File getMetaFile(FsDatasetSpi<?> fsd, String bpid, Block b)
throws IOException {
return FsDatasetUtil.getMetaFile(getBlockFile(fsd, bpid, b), b
.getGenerationStamp());
}
public static boolean unlinkBlock(FsDatasetSpi<?> fsd,
ExtendedBlock block, int numLinks) throws IOException {
final ReplicaInfo info = ((FsDatasetImpl)fsd).getReplicaInfo(block);
return info.unlinkBlock(numLinks);
}
public static ReplicaInfo fetchReplicaInfo (final FsDatasetSpi<?> fsd,
final String bpid, final long blockId) {
return ((FsDatasetImpl)fsd).fetchReplicaInfo(bpid, blockId);
}
public static long getPendingAsyncDeletions(FsDatasetSpi<?> fsd) {
return ((FsDatasetImpl)fsd).asyncDiskService.countPendingDeletions();
}
public static Collection<ReplicaInfo> getReplicas(FsDatasetSpi<?> fsd,
String bpid) {
return ((FsDatasetImpl)fsd).volumeMap.replicas(bpid);
}
/**
* Stop the lazy writer daemon that saves RAM disk files to persistent storage.
* @param dn
*/
public static void stopLazyWriter(DataNode dn) {
FsDatasetImpl fsDataset = ((FsDatasetImpl) dn.getFSDataset());
((FsDatasetImpl.LazyWriter) fsDataset.lazyWriter.getRunnable()).stop();
}
/**
* Asserts that the storage lock file in the given directory has been
* released. This method works by trying to acquire the lock file itself. If
* locking fails here, then the main code must have failed to release it.
*
* @param dir the storage directory to check
* @throws IOException if there is an unexpected I/O error
*/
public static void assertFileLockReleased(String dir) throws IOException {
StorageLocation sl = StorageLocation.parse(dir);
File lockFile = new File(sl.getFile(), Storage.STORAGE_FILE_LOCK);
try (RandomAccessFile raf = new RandomAccessFile(lockFile, "rws");
FileChannel channel = raf.getChannel()) {
FileLock lock = channel.tryLock();
assertNotNull(String.format(
"Lock file at %s appears to be held by a different process.",
lockFile.getAbsolutePath()), lock);
if (lock != null) {
try {
lock.release();
} catch (IOException e) {
FsDatasetImpl.LOG.warn(String.format("I/O error releasing file lock %s.",
lockFile.getAbsolutePath()), e);
throw e;
}
}
} catch (OverlappingFileLockException e) {
fail(String.format("Must release lock file at %s.",
lockFile.getAbsolutePath()));
}
}
}
| 4,555 | 37.285714 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestScrLazyPersistFiles.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
import com.google.common.base.Preconditions;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.ClientContext;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.net.unix.DomainSocket;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.NativeCodeLoader;
import org.junit.Assert;
import org.junit.Assume;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import java.io.File;
import java.io.IOException;
import java.util.concurrent.TimeoutException;
import static org.apache.hadoop.fs.StorageType.DEFAULT;
import static org.apache.hadoop.fs.StorageType.RAM_DISK;
import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
/**
* Test Lazy persist behavior with short-circuit reads. These tests
* will be run on Linux only with Native IO enabled. The tests fake
* RAM_DISK storage using local disk.
*/
public class TestScrLazyPersistFiles extends LazyPersistTestCase {
@BeforeClass
public static void init() {
DomainSocket.disableBindPathValidation();
}
@Before
public void before() {
Assume.assumeThat(NativeCodeLoader.isNativeCodeLoaded() && !Path.WINDOWS,
equalTo(true));
Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
final long osPageSize = NativeIO.POSIX.getCacheManipulator().getOperatingSystemPageSize();
Preconditions.checkState(BLOCK_SIZE >= osPageSize);
Preconditions.checkState(BLOCK_SIZE % osPageSize == 0);
}
@Rule
public ExpectedException exception = ExpectedException.none();
/**
* Read in-memory block with Short Circuit Read
* Note: the test uses faked RAM_DISK from physical disk.
*/
@Test
public void testRamDiskShortCircuitRead()
throws IOException, InterruptedException, TimeoutException {
getClusterBuilder().setUseScr(true).build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
final int SEED = 0xFADED;
Path path = new Path("/" + METHOD_NAME + ".dat");
// Create a file and wait till it is persisted.
makeRandomTestFile(path, BLOCK_SIZE, true, SEED);
ensureFileReplicasOnStorageType(path, RAM_DISK);
waitForMetric("RamDiskBlocksLazyPersisted", 1);
HdfsDataInputStream fis = (HdfsDataInputStream) fs.open(path);
// Verify SCR read counters
try {
byte[] buf = new byte[BUFFER_LENGTH];
fis.read(0, buf, 0, BUFFER_LENGTH);
Assert.assertEquals(BUFFER_LENGTH,
fis.getReadStatistics().getTotalBytesRead());
Assert.assertEquals(BUFFER_LENGTH,
fis.getReadStatistics().getTotalShortCircuitBytesRead());
} finally {
fis.close();
fis = null;
}
}
/**
* Eviction of lazy persisted blocks with Short Circuit Read handle open
* Note: the test uses faked RAM_DISK from physical disk.
* @throws IOException
* @throws InterruptedException
*/
@Test
public void tesScrDuringEviction()
throws Exception {
getClusterBuilder().setUseScr(true).build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
// Create a file and wait till it is persisted.
makeTestFile(path1, BLOCK_SIZE, true);
ensureFileReplicasOnStorageType(path1, RAM_DISK);
waitForMetric("RamDiskBlocksLazyPersisted", 1);
HdfsDataInputStream fis = (HdfsDataInputStream) fs.open(path1);
try {
// Keep and open read handle to path1 while creating path2
byte[] buf = new byte[BUFFER_LENGTH];
fis.read(0, buf, 0, BUFFER_LENGTH);
triggerEviction(cluster.getDataNodes().get(0));
// Ensure path1 is still readable from the open SCR handle.
fis.read(0, buf, 0, BUFFER_LENGTH);
assertThat(fis.getReadStatistics().getTotalBytesRead(),
is((long) 2 * BUFFER_LENGTH));
assertThat(fis.getReadStatistics().getTotalShortCircuitBytesRead(),
is((long) 2 * BUFFER_LENGTH));
} finally {
IOUtils.closeQuietly(fis);
}
}
@Test
public void testScrAfterEviction()
throws IOException, InterruptedException, TimeoutException {
getClusterBuilder().setUseScr(true)
.setUseLegacyBlockReaderLocal(false)
.build();
doShortCircuitReadAfterEvictionTest();
}
@Test
public void testLegacyScrAfterEviction()
throws IOException, InterruptedException, TimeoutException {
getClusterBuilder().setUseScr(true)
.setUseLegacyBlockReaderLocal(true)
.build();
doShortCircuitReadAfterEvictionTest();
// In the implementation of legacy short-circuit reads, any failure is
// trapped silently, reverts back to a remote read, and also disables all
// subsequent legacy short-circuit reads in the ClientContext.
// Assert that it didn't get disabled.
ClientContext clientContext = client.getClientContext();
Assert.assertFalse(clientContext.getDisableLegacyBlockReaderLocal());
}
private void doShortCircuitReadAfterEvictionTest() throws IOException,
InterruptedException, TimeoutException {
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
final int SEED = 0xFADED;
makeRandomTestFile(path1, BLOCK_SIZE, true, SEED);
ensureFileReplicasOnStorageType(path1, RAM_DISK);
waitForMetric("RamDiskBlocksLazyPersisted", 1);
// Verify short-circuit read from RAM_DISK.
File metaFile = cluster.getBlockMetadataFile(0,
DFSTestUtil.getFirstBlock(fs, path1));
assertTrue(metaFile.length() <= BlockMetadataHeader.getHeaderSize());
assertTrue(verifyReadRandomFile(path1, BLOCK_SIZE, SEED));
triggerEviction(cluster.getDataNodes().get(0));
// Verify short-circuit read still works from DEFAULT storage. This time,
// we'll have a checksum written during lazy persistence.
ensureFileReplicasOnStorageType(path1, DEFAULT);
metaFile = cluster.getBlockMetadataFile(0,
DFSTestUtil.getFirstBlock(fs, path1));
assertTrue(metaFile.length() > BlockMetadataHeader.getHeaderSize());
assertTrue(verifyReadRandomFile(path1, BLOCK_SIZE, SEED));
}
@Test
public void testScrBlockFileCorruption() throws IOException,
InterruptedException, TimeoutException {
getClusterBuilder().setUseScr(true)
.setUseLegacyBlockReaderLocal(false)
.build();
doShortCircuitReadBlockFileCorruptionTest();
}
@Test
public void testLegacyScrBlockFileCorruption() throws IOException,
InterruptedException, TimeoutException {
getClusterBuilder().setUseScr(true)
.setUseLegacyBlockReaderLocal(true)
.build();
doShortCircuitReadBlockFileCorruptionTest();
}
public void doShortCircuitReadBlockFileCorruptionTest() throws IOException,
InterruptedException, TimeoutException {
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
makeTestFile(path1, BLOCK_SIZE, true);
ensureFileReplicasOnStorageType(path1, RAM_DISK);
waitForMetric("RamDiskBlocksLazyPersisted", 1);
triggerEviction(cluster.getDataNodes().get(0));
// Corrupt the lazy-persisted block file, and verify that checksum
// verification catches it.
ensureFileReplicasOnStorageType(path1, DEFAULT);
cluster.corruptReplica(0, DFSTestUtil.getFirstBlock(fs, path1));
exception.expect(ChecksumException.class);
DFSTestUtil.readFileBuffer(fs, path1);
}
@Test
public void testScrMetaFileCorruption() throws IOException,
InterruptedException, TimeoutException {
getClusterBuilder().setUseScr(true)
.setUseLegacyBlockReaderLocal(false)
.build();
doShortCircuitReadMetaFileCorruptionTest();
}
@Test
public void testLegacyScrMetaFileCorruption() throws IOException,
InterruptedException, TimeoutException {
getClusterBuilder().setUseScr(true)
.setUseLegacyBlockReaderLocal(true)
.build();
doShortCircuitReadMetaFileCorruptionTest();
}
public void doShortCircuitReadMetaFileCorruptionTest() throws IOException,
InterruptedException, TimeoutException {
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
makeTestFile(path1, BLOCK_SIZE, true);
ensureFileReplicasOnStorageType(path1, RAM_DISK);
waitForMetric("RamDiskBlocksLazyPersisted", 1);
triggerEviction(cluster.getDataNodes().get(0));
// Corrupt the lazy-persisted checksum file, and verify that checksum
// verification catches it.
ensureFileReplicasOnStorageType(path1, DEFAULT);
File metaFile = cluster.getBlockMetadataFile(0,
DFSTestUtil.getFirstBlock(fs, path1));
MiniDFSCluster.corruptBlock(metaFile);
exception.expect(ChecksumException.class);
DFSTestUtil.readFileBuffer(fs, path1);
}
}
| 10,474 | 37.229927 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistPolicy.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Test;
import java.io.IOException;
import static org.hamcrest.core.Is.is;
import static org.hamcrest.core.IsNot.not;
import static org.junit.Assert.assertThat;
public class TestLazyPersistPolicy extends LazyPersistTestCase {
@Test
public void testPolicyNotSetByDefault() throws IOException {
getClusterBuilder().build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path path = new Path("/" + METHOD_NAME + ".dat");
makeTestFile(path, 0, false);
// Stat the file and check that the LAZY_PERSIST policy is not
// returned back.
HdfsFileStatus status = client.getFileInfo(path.toString());
assertThat(status.getStoragePolicy(), not(LAZY_PERSIST_POLICY_ID));
}
@Test
public void testPolicyPropagation() throws IOException {
getClusterBuilder().build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path path = new Path("/" + METHOD_NAME + ".dat");
makeTestFile(path, 0, true);
// Stat the file and check that the lazyPersist flag is returned back.
HdfsFileStatus status = client.getFileInfo(path.toString());
assertThat(status.getStoragePolicy(), is(LAZY_PERSIST_POLICY_ID));
}
@Test
public void testPolicyPersistenceInEditLog() throws IOException {
getClusterBuilder().build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path path = new Path("/" + METHOD_NAME + ".dat");
makeTestFile(path, 0, true);
cluster.restartNameNode(true);
// Stat the file and check that the lazyPersist flag is returned back.
HdfsFileStatus status = client.getFileInfo(path.toString());
assertThat(status.getStoragePolicy(), is(LAZY_PERSIST_POLICY_ID));
}
@Test
public void testPolicyPersistenceInFsImage() throws IOException {
getClusterBuilder().build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path path = new Path("/" + METHOD_NAME + ".dat");
makeTestFile(path, 0, true);
// checkpoint
fs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
fs.saveNamespace();
fs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
cluster.restartNameNode(true);
// Stat the file and check that the lazyPersist flag is returned back.
HdfsFileStatus status = client.getFileInfo(path.toString());
assertThat(status.getStoragePolicy(), is(LAZY_PERSIST_POLICY_ID));
}
}
| 3,467 | 36.695652 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil;
import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
import org.apache.hadoop.hdfs.server.datanode.ReplicaAlreadyExistsException;
import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten;
import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline;
import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface;
import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
import org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException;
import org.apache.hadoop.hdfs.server.datanode.ReplicaUnderRecovery;
import org.apache.hadoop.hdfs.server.datanode.ReplicaWaitingToBeRecovered;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
import org.junit.Assert;
import org.junit.Test;
/** Test if FSDataset#append, writeToRbw, and writeToTmp */
public class TestWriteToReplica {
final private static int FINALIZED = 0;
final private static int TEMPORARY = 1;
final private static int RBW = 2;
final private static int RWR = 3;
final private static int RUR = 4;
final private static int NON_EXISTENT = 5;
// test close
@Test
public void testClose() throws Exception {
MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).build();
try {
cluster.waitActive();
DataNode dn = cluster.getDataNodes().get(0);
FsDatasetImpl dataSet = (FsDatasetImpl)DataNodeTestUtils.getFSDataset(dn);
// set up replicasMap
String bpid = cluster.getNamesystem().getBlockPoolId();
ExtendedBlock[] blocks = setup(bpid, dataSet);
// test close
testClose(dataSet, blocks);
} finally {
cluster.shutdown();
}
}
// test append
@Test
public void testAppend() throws Exception {
MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).build();
try {
cluster.waitActive();
DataNode dn = cluster.getDataNodes().get(0);
FsDatasetImpl dataSet = (FsDatasetImpl)DataNodeTestUtils.getFSDataset(dn);
// set up replicasMap
String bpid = cluster.getNamesystem().getBlockPoolId();
ExtendedBlock[] blocks = setup(bpid, dataSet);
// test append
testAppend(bpid, dataSet, blocks);
} finally {
cluster.shutdown();
}
}
// test writeToRbw
@Test
public void testWriteToRbw() throws Exception {
MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).build();
try {
cluster.waitActive();
DataNode dn = cluster.getDataNodes().get(0);
FsDatasetImpl dataSet = (FsDatasetImpl)DataNodeTestUtils.getFSDataset(dn);
// set up replicasMap
String bpid = cluster.getNamesystem().getBlockPoolId();
ExtendedBlock[] blocks = setup(bpid, dataSet);
// test writeToRbw
testWriteToRbw(dataSet, blocks);
} finally {
cluster.shutdown();
}
}
// test writeToTemporary
@Test
public void testWriteToTemporary() throws Exception {
MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).build();
try {
cluster.waitActive();
DataNode dn = cluster.getDataNodes().get(0);
FsDatasetImpl dataSet = (FsDatasetImpl)DataNodeTestUtils.getFSDataset(dn);
// set up replicasMap
String bpid = cluster.getNamesystem().getBlockPoolId();
ExtendedBlock[] blocks = setup(bpid, dataSet);
// test writeToTemporary
testWriteToTemporary(dataSet, blocks);
} finally {
cluster.shutdown();
}
}
/**
* Generate testing environment and return a collection of blocks
* on which to run the tests.
*
* @param bpid Block pool ID to generate blocks for
* @param dataSet Namespace in which to insert blocks
* @return Contrived blocks for further testing.
* @throws IOException
*/
private ExtendedBlock[] setup(String bpid, FsDatasetImpl dataSet) throws IOException {
// setup replicas map
ExtendedBlock[] blocks = new ExtendedBlock[] {
new ExtendedBlock(bpid, 1, 1, 2001), new ExtendedBlock(bpid, 2, 1, 2002),
new ExtendedBlock(bpid, 3, 1, 2003), new ExtendedBlock(bpid, 4, 1, 2004),
new ExtendedBlock(bpid, 5, 1, 2005), new ExtendedBlock(bpid, 6, 1, 2006)
};
ReplicaMap replicasMap = dataSet.volumeMap;
try (FsDatasetSpi.FsVolumeReferences references =
dataSet.getFsVolumeReferences()) {
FsVolumeImpl vol = (FsVolumeImpl) references.get(0);
ReplicaInfo replicaInfo = new FinalizedReplica(
blocks[FINALIZED].getLocalBlock(), vol,
vol.getCurrentDir().getParentFile());
replicasMap.add(bpid, replicaInfo);
replicaInfo.getBlockFile().createNewFile();
replicaInfo.getMetaFile().createNewFile();
replicasMap.add(bpid, new ReplicaInPipeline(
blocks[TEMPORARY].getBlockId(),
blocks[TEMPORARY].getGenerationStamp(), vol,
vol.createTmpFile(bpid, blocks[TEMPORARY].getLocalBlock())
.getParentFile(), 0));
replicaInfo = new ReplicaBeingWritten(blocks[RBW].getLocalBlock(), vol,
vol.createRbwFile(bpid, blocks[RBW].getLocalBlock()).getParentFile(),
null);
replicasMap.add(bpid, replicaInfo);
replicaInfo.getBlockFile().createNewFile();
replicaInfo.getMetaFile().createNewFile();
replicasMap.add(bpid, new ReplicaWaitingToBeRecovered(
blocks[RWR].getLocalBlock(), vol, vol.createRbwFile(bpid,
blocks[RWR].getLocalBlock()).getParentFile()));
replicasMap
.add(bpid, new ReplicaUnderRecovery(new FinalizedReplica(blocks[RUR]
.getLocalBlock(), vol, vol.getCurrentDir().getParentFile()),
2007));
}
return blocks;
}
private void testAppend(String bpid, FsDatasetImpl dataSet, ExtendedBlock[] blocks) throws IOException {
long newGS = blocks[FINALIZED].getGenerationStamp()+1;
final FsVolumeImpl v = (FsVolumeImpl)dataSet.volumeMap.get(
bpid, blocks[FINALIZED].getLocalBlock()).getVolume();
long available = v.getCapacity()-v.getDfsUsed();
long expectedLen = blocks[FINALIZED].getNumBytes();
try {
v.onBlockFileDeletion(bpid, -available);
blocks[FINALIZED].setNumBytes(expectedLen+100);
dataSet.append(blocks[FINALIZED], newGS, expectedLen);
Assert.fail("Should not have space to append to an RWR replica" + blocks[RWR]);
} catch (DiskOutOfSpaceException e) {
Assert.assertTrue(e.getMessage().startsWith(
"Insufficient space for appending to "));
}
v.onBlockFileDeletion(bpid, available);
blocks[FINALIZED].setNumBytes(expectedLen);
newGS = blocks[RBW].getGenerationStamp()+1;
dataSet.append(blocks[FINALIZED], newGS,
blocks[FINALIZED].getNumBytes()); // successful
blocks[FINALIZED].setGenerationStamp(newGS);
try {
dataSet.append(blocks[TEMPORARY], blocks[TEMPORARY].getGenerationStamp()+1,
blocks[TEMPORARY].getNumBytes());
Assert.fail("Should not have appended to a temporary replica "
+ blocks[TEMPORARY]);
} catch (ReplicaNotFoundException e) {
Assert.assertEquals(ReplicaNotFoundException.UNFINALIZED_REPLICA +
blocks[TEMPORARY], e.getMessage());
}
try {
dataSet.append(blocks[RBW], blocks[RBW].getGenerationStamp()+1,
blocks[RBW].getNumBytes());
Assert.fail("Should not have appended to an RBW replica" + blocks[RBW]);
} catch (ReplicaNotFoundException e) {
Assert.assertEquals(ReplicaNotFoundException.UNFINALIZED_REPLICA +
blocks[RBW], e.getMessage());
}
try {
dataSet.append(blocks[RWR], blocks[RWR].getGenerationStamp()+1,
blocks[RBW].getNumBytes());
Assert.fail("Should not have appended to an RWR replica" + blocks[RWR]);
} catch (ReplicaNotFoundException e) {
Assert.assertEquals(ReplicaNotFoundException.UNFINALIZED_REPLICA +
blocks[RWR], e.getMessage());
}
try {
dataSet.append(blocks[RUR], blocks[RUR].getGenerationStamp()+1,
blocks[RUR].getNumBytes());
Assert.fail("Should not have appended to an RUR replica" + blocks[RUR]);
} catch (ReplicaNotFoundException e) {
Assert.assertEquals(ReplicaNotFoundException.UNFINALIZED_REPLICA +
blocks[RUR], e.getMessage());
}
try {
dataSet.append(blocks[NON_EXISTENT],
blocks[NON_EXISTENT].getGenerationStamp(),
blocks[NON_EXISTENT].getNumBytes());
Assert.fail("Should not have appended to a non-existent replica " +
blocks[NON_EXISTENT]);
} catch (ReplicaNotFoundException e) {
Assert.assertEquals(ReplicaNotFoundException.NON_EXISTENT_REPLICA +
blocks[NON_EXISTENT], e.getMessage());
}
newGS = blocks[FINALIZED].getGenerationStamp()+1;
dataSet.recoverAppend(blocks[FINALIZED], newGS,
blocks[FINALIZED].getNumBytes()); // successful
blocks[FINALIZED].setGenerationStamp(newGS);
try {
dataSet.recoverAppend(blocks[TEMPORARY], blocks[TEMPORARY].getGenerationStamp()+1,
blocks[TEMPORARY].getNumBytes());
Assert.fail("Should not have appended to a temporary replica "
+ blocks[TEMPORARY]);
} catch (ReplicaNotFoundException e) {
Assert.assertTrue(e.getMessage().startsWith(
ReplicaNotFoundException.UNFINALIZED_AND_NONRBW_REPLICA));
}
newGS = blocks[RBW].getGenerationStamp()+1;
dataSet.recoverAppend(blocks[RBW], newGS, blocks[RBW].getNumBytes());
blocks[RBW].setGenerationStamp(newGS);
try {
dataSet.recoverAppend(blocks[RWR], blocks[RWR].getGenerationStamp()+1,
blocks[RBW].getNumBytes());
Assert.fail("Should not have appended to an RWR replica" + blocks[RWR]);
} catch (ReplicaNotFoundException e) {
Assert.assertTrue(e.getMessage().startsWith(
ReplicaNotFoundException.UNFINALIZED_AND_NONRBW_REPLICA));
}
try {
dataSet.recoverAppend(blocks[RUR], blocks[RUR].getGenerationStamp()+1,
blocks[RUR].getNumBytes());
Assert.fail("Should not have appended to an RUR replica" + blocks[RUR]);
} catch (ReplicaNotFoundException e) {
Assert.assertTrue(e.getMessage().startsWith(
ReplicaNotFoundException.UNFINALIZED_AND_NONRBW_REPLICA));
}
try {
dataSet.recoverAppend(blocks[NON_EXISTENT],
blocks[NON_EXISTENT].getGenerationStamp(),
blocks[NON_EXISTENT].getNumBytes());
Assert.fail("Should not have appended to a non-existent replica " +
blocks[NON_EXISTENT]);
} catch (ReplicaNotFoundException e) {
Assert.assertTrue(e.getMessage().startsWith(
ReplicaNotFoundException.NON_EXISTENT_REPLICA));
}
}
private void testClose(FsDatasetImpl dataSet, ExtendedBlock [] blocks) throws IOException {
long newGS = blocks[FINALIZED].getGenerationStamp()+1;
dataSet.recoverClose(blocks[FINALIZED], newGS,
blocks[FINALIZED].getNumBytes()); // successful
blocks[FINALIZED].setGenerationStamp(newGS);
try {
dataSet.recoverClose(blocks[TEMPORARY], blocks[TEMPORARY].getGenerationStamp()+1,
blocks[TEMPORARY].getNumBytes());
Assert.fail("Should not have recovered close a temporary replica "
+ blocks[TEMPORARY]);
} catch (ReplicaNotFoundException e) {
Assert.assertTrue(e.getMessage().startsWith(
ReplicaNotFoundException.UNFINALIZED_AND_NONRBW_REPLICA));
}
newGS = blocks[RBW].getGenerationStamp()+1;
dataSet.recoverClose(blocks[RBW], newGS, blocks[RBW].getNumBytes());
blocks[RBW].setGenerationStamp(newGS);
try {
dataSet.recoverClose(blocks[RWR], blocks[RWR].getGenerationStamp()+1,
blocks[RBW].getNumBytes());
Assert.fail("Should not have recovered close an RWR replica" + blocks[RWR]);
} catch (ReplicaNotFoundException e) {
Assert.assertTrue(e.getMessage().startsWith(
ReplicaNotFoundException.UNFINALIZED_AND_NONRBW_REPLICA));
}
try {
dataSet.recoverClose(blocks[RUR], blocks[RUR].getGenerationStamp()+1,
blocks[RUR].getNumBytes());
Assert.fail("Should not have recovered close an RUR replica" + blocks[RUR]);
} catch (ReplicaNotFoundException e) {
Assert.assertTrue(e.getMessage().startsWith(
ReplicaNotFoundException.UNFINALIZED_AND_NONRBW_REPLICA));
}
try {
dataSet.recoverClose(blocks[NON_EXISTENT],
blocks[NON_EXISTENT].getGenerationStamp(),
blocks[NON_EXISTENT].getNumBytes());
Assert.fail("Should not have recovered close a non-existent replica " +
blocks[NON_EXISTENT]);
} catch (ReplicaNotFoundException e) {
Assert.assertTrue(e.getMessage().startsWith(
ReplicaNotFoundException.NON_EXISTENT_REPLICA));
}
}
private void testWriteToRbw(FsDatasetImpl dataSet, ExtendedBlock[] blocks) throws IOException {
try {
dataSet.recoverRbw(blocks[FINALIZED],
blocks[FINALIZED].getGenerationStamp()+1,
0L, blocks[FINALIZED].getNumBytes());
Assert.fail("Should not have recovered a finalized replica " +
blocks[FINALIZED]);
} catch (ReplicaNotFoundException e) {
Assert.assertTrue(e.getMessage().startsWith(
ReplicaNotFoundException.NON_RBW_REPLICA));
}
try {
dataSet.createRbw(StorageType.DEFAULT, blocks[FINALIZED], false);
Assert.fail("Should not have created a replica that's already " +
"finalized " + blocks[FINALIZED]);
} catch (ReplicaAlreadyExistsException e) {
}
try {
dataSet.recoverRbw(blocks[TEMPORARY],
blocks[TEMPORARY].getGenerationStamp()+1,
0L, blocks[TEMPORARY].getNumBytes());
Assert.fail("Should not have recovered a temporary replica " +
blocks[TEMPORARY]);
} catch (ReplicaNotFoundException e) {
Assert.assertTrue(e.getMessage().startsWith(
ReplicaNotFoundException.NON_RBW_REPLICA));
}
try {
dataSet.createRbw(StorageType.DEFAULT, blocks[TEMPORARY], false);
Assert.fail("Should not have created a replica that had created as " +
"temporary " + blocks[TEMPORARY]);
} catch (ReplicaAlreadyExistsException e) {
}
dataSet.recoverRbw(blocks[RBW], blocks[RBW].getGenerationStamp()+1,
0L, blocks[RBW].getNumBytes()); // expect to be successful
try {
dataSet.createRbw(StorageType.DEFAULT, blocks[RBW], false);
Assert.fail("Should not have created a replica that had created as RBW " +
blocks[RBW]);
} catch (ReplicaAlreadyExistsException e) {
}
try {
dataSet.recoverRbw(blocks[RWR], blocks[RWR].getGenerationStamp()+1,
0L, blocks[RWR].getNumBytes());
Assert.fail("Should not have recovered a RWR replica " + blocks[RWR]);
} catch (ReplicaNotFoundException e) {
Assert.assertTrue(e.getMessage().startsWith(
ReplicaNotFoundException.NON_RBW_REPLICA));
}
try {
dataSet.createRbw(StorageType.DEFAULT, blocks[RWR], false);
Assert.fail("Should not have created a replica that was waiting to be " +
"recovered " + blocks[RWR]);
} catch (ReplicaAlreadyExistsException e) {
}
try {
dataSet.recoverRbw(blocks[RUR], blocks[RUR].getGenerationStamp()+1,
0L, blocks[RUR].getNumBytes());
Assert.fail("Should not have recovered a RUR replica " + blocks[RUR]);
} catch (ReplicaNotFoundException e) {
Assert.assertTrue(e.getMessage().startsWith(
ReplicaNotFoundException.NON_RBW_REPLICA));
}
try {
dataSet.createRbw(StorageType.DEFAULT, blocks[RUR], false);
Assert.fail("Should not have created a replica that was under recovery " +
blocks[RUR]);
} catch (ReplicaAlreadyExistsException e) {
}
try {
dataSet.recoverRbw(blocks[NON_EXISTENT],
blocks[NON_EXISTENT].getGenerationStamp()+1,
0L, blocks[NON_EXISTENT].getNumBytes());
Assert.fail("Cannot recover a non-existent replica " +
blocks[NON_EXISTENT]);
} catch (ReplicaNotFoundException e) {
Assert.assertTrue(
e.getMessage().contains(ReplicaNotFoundException.NON_EXISTENT_REPLICA));
}
dataSet.createRbw(StorageType.DEFAULT, blocks[NON_EXISTENT], false);
}
private void testWriteToTemporary(FsDatasetImpl dataSet, ExtendedBlock[] blocks) throws IOException {
try {
dataSet.createTemporary(StorageType.DEFAULT, blocks[FINALIZED]);
Assert.fail("Should not have created a temporary replica that was " +
"finalized " + blocks[FINALIZED]);
} catch (ReplicaAlreadyExistsException e) {
}
try {
dataSet.createTemporary(StorageType.DEFAULT, blocks[TEMPORARY]);
Assert.fail("Should not have created a replica that had created as" +
"temporary " + blocks[TEMPORARY]);
} catch (ReplicaAlreadyExistsException e) {
}
try {
dataSet.createTemporary(StorageType.DEFAULT, blocks[RBW]);
Assert.fail("Should not have created a replica that had created as RBW " +
blocks[RBW]);
} catch (ReplicaAlreadyExistsException e) {
}
try {
dataSet.createTemporary(StorageType.DEFAULT, blocks[RWR]);
Assert.fail("Should not have created a replica that was waiting to be " +
"recovered " + blocks[RWR]);
} catch (ReplicaAlreadyExistsException e) {
}
try {
dataSet.createTemporary(StorageType.DEFAULT, blocks[RUR]);
Assert.fail("Should not have created a replica that was under recovery " +
blocks[RUR]);
} catch (ReplicaAlreadyExistsException e) {
}
dataSet.createTemporary(StorageType.DEFAULT, blocks[NON_EXISTENT]);
try {
dataSet.createTemporary(StorageType.DEFAULT, blocks[NON_EXISTENT]);
Assert.fail("Should not have created a replica that had already been "
+ "created " + blocks[NON_EXISTENT]);
} catch (Exception e) {
Assert.assertTrue(
e.getMessage().contains(blocks[NON_EXISTENT].getBlockName()));
Assert.assertTrue(e instanceof ReplicaAlreadyExistsException);
}
long newGenStamp = blocks[NON_EXISTENT].getGenerationStamp() * 10;
blocks[NON_EXISTENT].setGenerationStamp(newGenStamp);
try {
ReplicaInPipelineInterface replicaInfo =
dataSet.createTemporary(StorageType.DEFAULT, blocks[NON_EXISTENT]).getReplica();
Assert.assertTrue(replicaInfo.getGenerationStamp() == newGenStamp);
Assert.assertTrue(
replicaInfo.getBlockId() == blocks[NON_EXISTENT].getBlockId());
} catch (ReplicaAlreadyExistsException e) {
Assert.fail("createRbw() Should have removed the block with the older "
+ "genstamp and replaced it with the newer one: " + blocks[NON_EXISTENT]);
}
}
/**
* This is a test to check the replica map before and after the datanode
* quick restart (less than 5 minutes)
* @throws Exception
*/
@Test
public void testReplicaMapAfterDatanodeRestart() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
.build();
try {
cluster.waitActive();
NameNode nn1 = cluster.getNameNode(0);
NameNode nn2 = cluster.getNameNode(1);
assertNotNull("cannot create nn1", nn1);
assertNotNull("cannot create nn2", nn2);
// check number of volumes in fsdataset
DataNode dn = cluster.getDataNodes().get(0);
FsDatasetImpl dataSet = (FsDatasetImpl)DataNodeTestUtils.
getFSDataset(dn);
ReplicaMap replicaMap = dataSet.volumeMap;
List<FsVolumeImpl> volumes = null;
try (FsDatasetSpi.FsVolumeReferences referredVols = dataSet.getFsVolumeReferences()) {
// number of volumes should be 2 - [data1, data2]
assertEquals("number of volumes is wrong", 2, referredVols.size());
volumes = new ArrayList<>(referredVols.size());
for (FsVolumeSpi vol : referredVols) {
volumes.add((FsVolumeImpl) vol);
}
}
ArrayList<String> bpList = new ArrayList<String>(Arrays.asList(
cluster.getNamesystem(0).getBlockPoolId(),
cluster.getNamesystem(1).getBlockPoolId()));
Assert.assertTrue("Cluster should have 2 block pools",
bpList.size() == 2);
createReplicas(bpList, volumes, replicaMap);
ReplicaMap oldReplicaMap = new ReplicaMap(this);
oldReplicaMap.addAll(replicaMap);
cluster.restartDataNode(0);
cluster.waitActive();
dn = cluster.getDataNodes().get(0);
dataSet = (FsDatasetImpl) dn.getFSDataset();
testEqualityOfReplicaMap(oldReplicaMap, dataSet.volumeMap, bpList);
} finally {
cluster.shutdown();
}
}
/**
* Compare the replica map before and after the restart
**/
private void testEqualityOfReplicaMap(ReplicaMap oldReplicaMap, ReplicaMap
newReplicaMap, List<String> bpidList) {
// Traversing through newReplica map and remove the corresponding
// replicaInfo from oldReplicaMap.
for (String bpid: bpidList) {
for (ReplicaInfo info: newReplicaMap.replicas(bpid)) {
assertNotNull("Volume map before restart didn't contain the "
+ "blockpool: " + bpid, oldReplicaMap.replicas(bpid));
ReplicaInfo oldReplicaInfo = oldReplicaMap.get(bpid,
info.getBlockId());
// Volume map after restart contains a blockpool id which
assertNotNull("Old Replica Map didnt't contain block with blockId: " +
info.getBlockId(), oldReplicaInfo);
ReplicaState oldState = oldReplicaInfo.getState();
// Since after restart, all the RWR, RBW and RUR blocks gets
// converted to RWR
if (info.getState() == ReplicaState.RWR) {
if (oldState == ReplicaState.RWR || oldState == ReplicaState.RBW
|| oldState == ReplicaState.RUR) {
oldReplicaMap.remove(bpid, oldReplicaInfo);
}
} else if (info.getState() == ReplicaState.FINALIZED &&
oldState == ReplicaState.FINALIZED) {
oldReplicaMap.remove(bpid, oldReplicaInfo);
}
}
}
// We don't persist the ReplicaInPipeline replica
// and if the old replica map contains any replica except ReplicaInPipeline
// then we didn't persist that replica
for (String bpid: bpidList) {
for (ReplicaInfo replicaInfo: oldReplicaMap.replicas(bpid)) {
if (replicaInfo.getState() != ReplicaState.TEMPORARY) {
Assert.fail("After datanode restart we lost the block with blockId: "
+ replicaInfo.getBlockId());
}
}
}
}
private void createReplicas(List<String> bpList, List<FsVolumeImpl> volumes,
ReplicaMap volumeMap) throws IOException {
Assert.assertTrue("Volume map can't be null" , volumeMap != null);
// Here we create all different type of replicas and add it
// to volume map.
// Created all type of ReplicaInfo, each under Blkpool corresponding volume
long id = 1; // This variable is used as both blockId and genStamp
for (String bpId: bpList) {
for (FsVolumeImpl volume: volumes) {
ReplicaInfo finalizedReplica = new FinalizedReplica(id, 1, id, volume,
DatanodeUtil.idToBlockDir(volume.getFinalizedDir(bpId), id));
volumeMap.add(bpId, finalizedReplica);
id++;
ReplicaInfo rbwReplica = new ReplicaBeingWritten(id, 1, id, volume,
volume.getRbwDir(bpId), null, 100);
volumeMap.add(bpId, rbwReplica);
id++;
ReplicaInfo rwrReplica = new ReplicaWaitingToBeRecovered(id, 1, id,
volume, volume.getRbwDir(bpId));
volumeMap.add(bpId, rwrReplica);
id++;
ReplicaInfo ripReplica = new ReplicaInPipeline(id, id, volume,
volume.getTmpDir(bpId), 0);
volumeMap.add(bpId, ripReplica);
id++;
}
}
for (String bpId: bpList) {
for (ReplicaInfo replicaInfo: volumeMap.replicas(bpId)) {
File parentFile = replicaInfo.getBlockFile().getParentFile();
if (!parentFile.exists()) {
if (!parentFile.mkdirs()) {
throw new IOException("Failed to mkdirs " + parentFile);
}
}
replicaInfo.getBlockFile().createNewFile();
replicaInfo.getMetaFile().createNewFile();
}
}
}
}
| 26,424 | 38.38152 | 106 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/dtp/Http2ResponseHandler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.web.dtp;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.SimpleChannelInboundHandler;
import io.netty.handler.codec.http.FullHttpResponse;
import io.netty.handler.codec.http2.HttpUtil;
import io.netty.util.concurrent.Promise;
import java.util.HashMap;
import java.util.Map;
public class Http2ResponseHandler extends
SimpleChannelInboundHandler<FullHttpResponse> {
private Map<Integer, Promise<FullHttpResponse>> streamId2Promise =
new HashMap<>();
@Override
protected void channelRead0(ChannelHandlerContext ctx, FullHttpResponse msg)
throws Exception {
Integer streamId =
msg.headers().getInt(HttpUtil.ExtensionHeaderNames.STREAM_ID.text());
if (streamId == null) {
System.err.println("HttpResponseHandler unexpected message received: "
+ msg);
return;
}
if (streamId.intValue() == 1) {
// this is the upgrade response message, just ignore it.
return;
}
Promise<FullHttpResponse> promise;
synchronized (this) {
promise = streamId2Promise.get(streamId);
}
if (promise == null) {
System.err.println("Message received for unknown stream id " + streamId);
} else {
// Do stuff with the message (for now just print it)
promise.setSuccess(msg.retain());
}
}
public void put(Integer streamId, Promise<FullHttpResponse> promise) {
streamId2Promise.put(streamId, promise);
}
}
| 2,294 | 34.307692 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/dtp/TestDtpHttp2.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.web.dtp;
import static org.junit.Assert.assertEquals;
import io.netty.bootstrap.Bootstrap;
import io.netty.buffer.ByteBuf;
import io.netty.channel.Channel;
import io.netty.channel.ChannelInitializer;
import io.netty.channel.EventLoopGroup;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.nio.NioSocketChannel;
import io.netty.handler.codec.http.DefaultFullHttpRequest;
import io.netty.handler.codec.http.FullHttpRequest;
import io.netty.handler.codec.http.FullHttpResponse;
import io.netty.handler.codec.http.HttpMethod;
import io.netty.handler.codec.http.HttpResponseStatus;
import io.netty.handler.codec.http.HttpVersion;
import io.netty.handler.codec.http2.DefaultHttp2Connection;
import io.netty.handler.codec.http2.DefaultHttp2FrameReader;
import io.netty.handler.codec.http2.DefaultHttp2FrameWriter;
import io.netty.handler.codec.http2.DelegatingDecompressorFrameListener;
import io.netty.handler.codec.http2.Http2Connection;
import io.netty.handler.codec.http2.Http2ConnectionHandler;
import io.netty.handler.codec.http2.Http2FrameLogger;
import io.netty.handler.codec.http2.Http2FrameReader;
import io.netty.handler.codec.http2.Http2FrameWriter;
import io.netty.handler.codec.http2.Http2InboundFrameLogger;
import io.netty.handler.codec.http2.Http2OutboundFrameLogger;
import io.netty.handler.codec.http2.HttpToHttp2ConnectionHandler;
import io.netty.handler.codec.http2.HttpUtil;
import io.netty.handler.codec.http2.InboundHttp2ToHttpAdapter;
import io.netty.handler.logging.LogLevel;
import io.netty.handler.timeout.TimeoutException;
import io.netty.util.concurrent.Promise;
import java.io.IOException;
import java.net.URISyntaxException;
import java.nio.charset.StandardCharsets;
import java.util.concurrent.ExecutionException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestDtpHttp2 {
private static final Http2FrameLogger FRAME_LOGGER = new Http2FrameLogger(
LogLevel.INFO, TestDtpHttp2.class);
private static final Configuration CONF = WebHdfsTestUtil.createConf();
private static MiniDFSCluster CLUSTER;
private static final EventLoopGroup WORKER_GROUP = new NioEventLoopGroup();
private static Channel CHANNEL;
private static Http2ResponseHandler RESPONSE_HANDLER;
@BeforeClass
public static void setUp() throws IOException, URISyntaxException,
TimeoutException {
CLUSTER = new MiniDFSCluster.Builder(CONF).numDataNodes(1).build();
CLUSTER.waitActive();
RESPONSE_HANDLER = new Http2ResponseHandler();
Bootstrap bootstrap =
new Bootstrap()
.group(WORKER_GROUP)
.channel(NioSocketChannel.class)
.remoteAddress("127.0.0.1",
CLUSTER.getDataNodes().get(0).getInfoPort())
.handler(new ChannelInitializer<Channel>() {
@Override
protected void initChannel(Channel ch) throws Exception {
Http2Connection connection = new DefaultHttp2Connection(false);
Http2ConnectionHandler connectionHandler =
new HttpToHttp2ConnectionHandler(connection, frameReader(),
frameWriter(), new DelegatingDecompressorFrameListener(
connection, new InboundHttp2ToHttpAdapter.Builder(
connection).maxContentLength(Integer.MAX_VALUE)
.propagateSettings(true).build()));
ch.pipeline().addLast(connectionHandler, RESPONSE_HANDLER);
}
});
CHANNEL = bootstrap.connect().syncUninterruptibly().channel();
}
@AfterClass
public static void tearDown() throws IOException {
if (CHANNEL != null) {
CHANNEL.close().syncUninterruptibly();
}
WORKER_GROUP.shutdownGracefully();
if (CLUSTER != null) {
CLUSTER.shutdown();
}
}
private static Http2FrameReader frameReader() {
return new Http2InboundFrameLogger(new DefaultHttp2FrameReader(),
FRAME_LOGGER);
}
private static Http2FrameWriter frameWriter() {
return new Http2OutboundFrameLogger(new DefaultHttp2FrameWriter(),
FRAME_LOGGER);
}
@Test
public void test() throws InterruptedException, ExecutionException {
int streamId = 3;
FullHttpRequest request =
new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/");
request.headers().add(HttpUtil.ExtensionHeaderNames.STREAM_ID.text(),
streamId);
Promise<FullHttpResponse> promise = CHANNEL.eventLoop().newPromise();
synchronized (RESPONSE_HANDLER) {
CHANNEL.writeAndFlush(request);
RESPONSE_HANDLER.put(streamId, promise);
}
assertEquals(HttpResponseStatus.OK, promise.get().status());
ByteBuf content = promise.get().content();
assertEquals("HTTP/2 DTP", content.toString(StandardCharsets.UTF_8));
}
}
| 5,876 | 38.709459 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestParameterParser.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.web.webhdfs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HAUtilClient;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.web.resources.DelegationParam;
import org.apache.hadoop.hdfs.web.resources.NamenodeAddressParam;
import org.apache.hadoop.hdfs.web.resources.OffsetParam;
import org.apache.hadoop.security.token.Token;
import org.junit.Assert;
import org.junit.Test;
import io.netty.handler.codec.http.QueryStringDecoder;
import java.io.IOException;
import static org.mockito.Mockito.mock;
public class TestParameterParser {
private static final String LOGICAL_NAME = "minidfs";
@Test
public void testDeserializeHAToken() throws IOException {
Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
final Token<DelegationTokenIdentifier> token = new
Token<DelegationTokenIdentifier>();
QueryStringDecoder decoder = new QueryStringDecoder(
WebHdfsHandler.WEBHDFS_PREFIX + "/?"
+ NamenodeAddressParam.NAME + "=" + LOGICAL_NAME + "&"
+ DelegationParam.NAME + "=" + token.encodeToUrlString());
ParameterParser testParser = new ParameterParser(decoder, conf);
final Token<DelegationTokenIdentifier> tok2 = testParser.delegationToken();
Assert.assertTrue(HAUtilClient.isTokenForLogicalUri(tok2));
}
@Test
public void testDecodePath() {
final String ESCAPED_PATH = "/test%25+1%26%3Dtest?op=OPEN&foo=bar";
final String EXPECTED_PATH = "/test%+1&=test";
Configuration conf = new Configuration();
QueryStringDecoder decoder = new QueryStringDecoder(
WebHdfsHandler.WEBHDFS_PREFIX + ESCAPED_PATH);
ParameterParser testParser = new ParameterParser(decoder, conf);
Assert.assertEquals(EXPECTED_PATH, testParser.path());
}
@Test
public void testOffset() throws IOException {
final long X = 42;
long offset = new OffsetParam(Long.toString(X)).getOffset();
Assert.assertEquals("OffsetParam: ", X, offset);
offset = new OffsetParam((String) null).getOffset();
Assert.assertEquals("OffsetParam with null should have defaulted to 0", 0, offset);
try {
offset = new OffsetParam("abc").getValue();
Assert.fail("OffsetParam with nondigit value should have thrown IllegalArgumentException");
} catch (IllegalArgumentException iae) {
// Ignore
}
}
}
| 3,293 | 38.214286 | 97 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalReplica.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.extdataset;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.Replica;
public class ExternalReplica implements Replica {
@Override
public long getBlockId() {
return 0;
}
@Override
public long getGenerationStamp() {
return 0;
}
@Override
public ReplicaState getState() {
return ReplicaState.FINALIZED;
}
@Override
public long getNumBytes() {
return 0;
}
@Override
public long getBytesOnDisk() {
return 0;
}
@Override
public long getVisibleLength() {
return 0;
}
@Override
public String getStorageUuid() {
return null;
}
@Override
public boolean isOnTransientStorage() {
return false;
}
}
| 1,606 | 23.348485 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalVolumeImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.extdataset;
import java.io.File;
import java.io.IOException;
import java.nio.channels.ClosedChannelException;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
public class ExternalVolumeImpl implements FsVolumeSpi {
@Override
public String[] getBlockPoolList() {
return null;
}
@Override
public long getAvailable() throws IOException {
return 0;
}
@Override
public String getBasePath() {
return null;
}
@Override
public String getPath(String bpid) throws IOException {
return null;
}
@Override
public File getFinalizedDir(String bpid) throws IOException {
return null;
}
@Override
public FsVolumeReference obtainReference() throws ClosedChannelException {
return null;
}
@Override
public String getStorageID() {
return null;
}
@Override
public StorageType getStorageType() {
return StorageType.DEFAULT;
}
@Override
public boolean isTransientStorage() {
return false;
}
@Override
public void reserveSpaceForRbw(long bytesToReserve) {
}
@Override
public void releaseReservedSpace(long bytesToRelease) {
}
@Override
public void releaseLockedMemory(long bytesToRelease) {
}
@Override
public BlockIterator newBlockIterator(String bpid, String name) {
return null;
}
@Override
public BlockIterator loadBlockIterator(String bpid, String name)
throws IOException {
return null;
}
@Override
public FsDatasetSpi getDataset() {
return null;
}
}
| 2,557 | 23.361905 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/TestExternalDataset.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.extdataset;
import org.apache.hadoop.hdfs.server.datanode.Replica;
import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.junit.Test;
/**
* Tests the ability to create external FsDatasetSpi implementations.
*
* The purpose of this suite of tests is to ensure that it is possible to
* construct subclasses of FsDatasetSpi outside the Hadoop tree
* (specifically, outside of the org.apache.hadoop.hdfs.server.datanode
* package). This consists of creating subclasses of the two key classes
* (FsDatasetSpi and FsVolumeSpi) *and* instances or subclasses of any
* classes/interfaces their methods need to produce. If methods are added
* to or changed in any superclasses, or if constructors of other classes
* are changed, this package will fail to compile. In fixing this
* compilation error, any new class dependencies should receive the same
* treatment.
*
* It is worth noting what these tests do *not* accomplish. Just as
* important as being able to produce instances of the appropriate classes
* is being able to access all necessary methods on those classes as well
* as on any additional classes accepted as inputs to FsDatasetSpi's
* methods. It wouldn't be correct to mandate all methods be public, as
* that would defeat encapsulation. Moreover, there is no natural
* mechanism that would prevent a manually-constructed list of methods
* from becoming stale. Rather than creating tests with no clear means of
* maintaining them, this problem is left unsolved for now.
*
* Lastly, though merely compiling this package should signal success,
* explicit testInstantiate* unit tests are included below so as to have a
* tangible means of referring to each case.
*/
public class TestExternalDataset {
/**
* Tests instantiating an FsDatasetSpi subclass.
*/
@Test
public void testInstantiateDatasetImpl() throws Throwable {
FsDatasetSpi<?> inst = new ExternalDatasetImpl();
}
/**
* Tests instantiating a Replica subclass.
*/
@Test
public void testIntantiateExternalReplica() throws Throwable {
Replica inst = new ExternalReplica();
}
/**
* Tests instantiating a ReplicaInPipelineInterface subclass.
*/
@Test
public void testInstantiateReplicaInPipeline() throws Throwable {
ReplicaInPipelineInterface inst = new ExternalReplicaInPipeline();
}
/**
* Tests instantiating an FsVolumeSpi subclass.
*/
@Test
public void testInstantiateVolumeImpl() throws Throwable {
FsVolumeSpi inst = new ExternalVolumeImpl();
}
}
| 3,548 | 38.876404 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.extdataset;
import java.io.*;
import java.util.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.*;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaInputStreams;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams;
import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetricHelper;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
import org.apache.hadoop.metrics2.MetricsCollector;
public class ExternalDatasetImpl implements FsDatasetSpi<ExternalVolumeImpl> {
private final DatanodeStorage storage = new DatanodeStorage(
DatanodeStorage.generateUuid(), DatanodeStorage.State.NORMAL,
StorageType.DEFAULT);
@Override
public FsVolumeReferences getFsVolumeReferences() {
return null;
}
@Override
public void addVolume(StorageLocation location, List<NamespaceInfo> nsInfos) throws IOException {
}
@Override
public void removeVolumes(Set<File> volumes, boolean clearFailure) {
}
@Override
public DatanodeStorage getStorage(String storageUuid) {
return null;
}
@Override
public StorageReport[] getStorageReports(String bpid) throws IOException {
StorageReport[] result = new StorageReport[1];
result[0] = new StorageReport(storage, false, 0, 0, 0, 0);
return result;
}
@Override
public ExternalVolumeImpl getVolume(ExtendedBlock b) {
return null;
}
@Override
public Map<String, Object> getVolumeInfoMap() {
return null;
}
@Override
public List<FinalizedReplica> getFinalizedBlocks(String bpid) {
return null;
}
@Override
public List<FinalizedReplica> getFinalizedBlocksOnPersistentStorage(String bpid) {
return null;
}
@Override
public void checkAndUpdate(String bpid, long blockId, File diskFile,
File diskMetaFile, FsVolumeSpi vol) {
}
@Override
public LengthInputStream getMetaDataInputStream(ExtendedBlock b)
throws IOException {
return new LengthInputStream(null, 0);
}
@Override
public long getLength(ExtendedBlock b) throws IOException {
return 0;
}
@Override
@Deprecated
public Replica getReplica(String bpid, long blockId) {
return new ExternalReplica();
}
@Override
public String getReplicaString(String bpid, long blockId) {
return null;
}
@Override
public Block getStoredBlock(String bpid, long blkid) throws IOException {
return new Block();
}
@Override
public InputStream getBlockInputStream(ExtendedBlock b, long seekOffset)
throws IOException {
return null;
}
@Override
public ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, long blkoff,
long ckoff) throws IOException {
return new ReplicaInputStreams(null, null, null);
}
@Override
public ReplicaHandler createTemporary(StorageType t, ExtendedBlock b)
throws IOException {
return new ReplicaHandler(new ExternalReplicaInPipeline(), null);
}
@Override
public ReplicaHandler createRbw(StorageType t, ExtendedBlock b, boolean tf)
throws IOException {
return new ReplicaHandler(new ExternalReplicaInPipeline(), null);
}
@Override
public ReplicaHandler recoverRbw(ExtendedBlock b, long newGS,
long minBytesRcvd, long maxBytesRcvd) throws IOException {
return new ReplicaHandler(new ExternalReplicaInPipeline(), null);
}
@Override
public ReplicaInPipelineInterface convertTemporaryToRbw(
ExtendedBlock temporary) throws IOException {
return new ExternalReplicaInPipeline();
}
@Override
public ReplicaHandler append(ExtendedBlock b, long newGS,
long expectedBlockLen) throws IOException {
return new ReplicaHandler(new ExternalReplicaInPipeline(), null);
}
@Override
public ReplicaHandler recoverAppend(ExtendedBlock b, long newGS,
long expectedBlockLen) throws IOException {
return new ReplicaHandler(new ExternalReplicaInPipeline(), null);
}
@Override
public String recoverClose(ExtendedBlock b, long newGS, long expectedBlockLen)
throws IOException {
return null;
}
@Override
public void finalizeBlock(ExtendedBlock b) throws IOException {
}
@Override
public void unfinalizeBlock(ExtendedBlock b) throws IOException {
}
@Override
public Map<DatanodeStorage, BlockListAsLongs> getBlockReports(String bpid) {
final Map<DatanodeStorage, BlockListAsLongs> result =
new HashMap<DatanodeStorage, BlockListAsLongs>();
result.put(storage, BlockListAsLongs.EMPTY);
return result;
}
@Override
public List<Long> getCacheReport(String bpid) {
return null;
}
@Override
public boolean contains(ExtendedBlock block) {
return false;
}
@Override
public void checkBlock(ExtendedBlock b, long minLength, ReplicaState state) throws ReplicaNotFoundException, UnexpectedReplicaStateException, FileNotFoundException, EOFException, IOException {
}
@Override
public boolean isValidBlock(ExtendedBlock b) {
return false;
}
@Override
public boolean isValidRbw(ExtendedBlock b) {
return false;
}
@Override
public void invalidate(String bpid, Block[] invalidBlks) throws IOException {
}
@Override
public void cache(String bpid, long[] blockIds) {
}
@Override
public void uncache(String bpid, long[] blockIds) {
}
@Override
public boolean isCached(String bpid, long blockId) {
return false;
}
@Override
public Set<File> checkDataDir() {
return null;
}
@Override
public void shutdown() {
}
@Override
public void adjustCrcChannelPosition(ExtendedBlock b,
ReplicaOutputStreams outs, int checksumSize) throws IOException {
}
@Override
public boolean hasEnoughResource() {
return false;
}
@Override
public long getReplicaVisibleLength(ExtendedBlock block) throws IOException {
return 0;
}
@Override
public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock)
throws IOException {
return new ReplicaRecoveryInfo(0, 0, 0, ReplicaState.FINALIZED);
}
@Override
public String updateReplicaUnderRecovery(ExtendedBlock oldBlock,
long recoveryId, long newBlockId, long newLength) throws IOException {
return null;
}
@Override
public void addBlockPool(String bpid, Configuration conf) throws IOException {
}
@Override
public void shutdownBlockPool(String bpid) {
}
@Override
public void deleteBlockPool(String bpid, boolean force) throws IOException {
}
@Override
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock b)
throws IOException {
return new BlockLocalPathInfo(null, "file", "metafile");
}
@Override
public HdfsBlocksMetadata getHdfsBlocksMetadata(String bpid, long[] blockIds) throws IOException {
return new HdfsBlocksMetadata(null, null, null, null);
}
@Override
public void enableTrash(String bpid) {
}
@Override
public void clearTrash(String bpid) {
}
@Override
public boolean trashEnabled(String bpid) {
return false;
}
@Override
public void setRollingUpgradeMarker(String bpid) throws IOException {
}
@Override
public void clearRollingUpgradeMarker(String bpid) throws IOException {
}
@Override
public void submitBackgroundSyncFileRangeRequest(ExtendedBlock block, FileDescriptor fd, long offset, long nbytes, int flags) {
}
@Override
public void onCompleteLazyPersist(String bpId, long blockId, long creationTime, File[] savedFiles, ExternalVolumeImpl targetVolume) {
}
@Override
public void onFailLazyPersist(String bpId, long blockId) {
}
@Override
public ReplicaInfo moveBlockAcrossStorage(ExtendedBlock block, StorageType targetStorageType) throws IOException {
return null;
}
@Override
public long getBlockPoolUsed(String bpid) throws IOException {
return 0;
}
@Override
public long getDfsUsed() throws IOException {
return 0;
}
@Override
public long getCapacity() {
return 0;
}
@Override
public long getRemaining() throws IOException {
return 0;
}
@Override
public String getStorageInfo() {
return null;
}
@Override
public int getNumFailedVolumes() {
return 0;
}
@Override
public String[] getFailedStorageLocations() {
return null;
}
@Override
public long getLastVolumeFailureDate() {
return 0;
}
@Override
public long getEstimatedCapacityLostTotal() {
return 0;
}
@Override
public VolumeFailureSummary getVolumeFailureSummary() {
return null;
}
@Override
public long getCacheUsed() {
return 0;
}
@Override
public long getCacheCapacity() {
return 0;
}
@Override
public long getNumBlocksCached() {
return 0;
}
@Override
public long getNumBlocksFailedToCache() {
return 0;
}
@Override
public long getNumBlocksFailedToUncache() {
return 0;
}
/**
* Get metrics from the metrics source
*
* @param collector to contain the resulting metrics snapshot
* @param all if true, return all metrics even if unchanged.
*/
@Override
public void getMetrics(MetricsCollector collector, boolean all) {
try {
DataNodeMetricHelper.getMetrics(collector, this, "ExternalDataset");
} catch (Exception e){
//ignore exceptions
}
}
@Override
public void setPinning(ExtendedBlock block) throws IOException {
}
@Override
public boolean getPinning(ExtendedBlock block) throws IOException {
return false;
}
@Override
public boolean isDeletingBlock(String bpid, long blockId) {
return false;
}
}
| 11,428 | 24.285398 | 194 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalReplicaInPipeline.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.extdataset;
import java.io.IOException;
import java.io.OutputStream;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.ChunkChecksum;
import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams;
import org.apache.hadoop.util.DataChecksum;
public class ExternalReplicaInPipeline implements ReplicaInPipelineInterface {
@Override
public void setNumBytes(long bytesReceived) {
}
@Override
public long getBytesAcked() {
return 0;
}
@Override
public void setBytesAcked(long bytesAcked) {
}
@Override
public void releaseAllBytesReserved() {
}
@Override
public void setLastChecksumAndDataLen(long dataLength, byte[] lastChecksum) {
}
@Override
public ChunkChecksum getLastChecksumAndDataLen() {
return new ChunkChecksum(0, null);
}
@Override
public ReplicaOutputStreams createStreams(boolean isCreate,
DataChecksum requestedChecksum) throws IOException {
return new ReplicaOutputStreams(null, null, requestedChecksum, false);
}
@Override
public OutputStream createRestartMetaStream() throws IOException {
return null;
}
@Override
public long getBlockId() {
return 0;
}
@Override
public long getGenerationStamp() {
return 0;
}
@Override
public ReplicaState getState() {
return ReplicaState.FINALIZED;
}
@Override
public long getNumBytes() {
return 0;
}
@Override
public long getBytesOnDisk() {
return 0;
}
@Override
public long getVisibleLength() {
return 0;
}
@Override
public String getStorageUuid() {
return null;
}
@Override
public boolean isOnTransientStorage() {
return false;
}
}
| 2,668 | 23.486239 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.junit.Before;
import org.junit.Test;
public class TestHost2NodesMap {
private final Host2NodesMap map = new Host2NodesMap();
private DatanodeDescriptor dataNodes[];
@Before
public void setup() {
dataNodes = new DatanodeDescriptor[] {
DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/d1/r1"),
DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/d1/r1"),
DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1/r2"),
DFSTestUtil.getDatanodeDescriptor("3.3.3.3", 5021, "/d1/r2"),
};
for (DatanodeDescriptor node : dataNodes) {
map.add(node);
}
map.add(null);
}
@Test
public void testContains() throws Exception {
DatanodeDescriptor nodeNotInMap =
DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1/r4");
for (int i = 0; i < dataNodes.length; i++) {
assertTrue(map.contains(dataNodes[i]));
}
assertFalse(map.contains(null));
assertFalse(map.contains(nodeNotInMap));
}
@Test
public void testGetDatanodeByHost() throws Exception {
assertEquals(map.getDatanodeByHost("1.1.1.1"), dataNodes[0]);
assertEquals(map.getDatanodeByHost("2.2.2.2"), dataNodes[1]);
DatanodeDescriptor node = map.getDatanodeByHost("3.3.3.3");
assertTrue(node == dataNodes[2] || node == dataNodes[3]);
assertNull(map.getDatanodeByHost("4.4.4.4"));
}
@Test
public void testRemove() throws Exception {
DatanodeDescriptor nodeNotInMap =
DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1/r4");
assertFalse(map.remove(nodeNotInMap));
assertTrue(map.remove(dataNodes[0]));
assertTrue(map.getDatanodeByHost("1.1.1.1.")==null);
assertTrue(map.getDatanodeByHost("2.2.2.2")==dataNodes[1]);
DatanodeDescriptor node = map.getDatanodeByHost("3.3.3.3");
assertTrue(node==dataNodes[2] || node==dataNodes[3]);
assertNull(map.getDatanodeByHost("4.4.4.4"));
assertTrue(map.remove(dataNodes[2]));
assertNull(map.getDatanodeByHost("1.1.1.1"));
assertEquals(map.getDatanodeByHost("2.2.2.2"), dataNodes[1]);
assertEquals(map.getDatanodeByHost("3.3.3.3"), dataNodes[3]);
assertTrue(map.remove(dataNodes[3]));
assertNull(map.getDatanodeByHost("1.1.1.1"));
assertEquals(map.getDatanodeByHost("2.2.2.2"), dataNodes[1]);
assertNull(map.getDatanodeByHost("3.3.3.3"));
assertFalse(map.remove(null));
assertTrue(map.remove(dataNodes[1]));
assertFalse(map.remove(dataNodes[1]));
}
}
| 3,547 | 35.57732 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportRateLimiting.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_FULL_BLOCK_REPORT_LEASES;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_FULL_BLOCK_REPORT_LEASE_LENGTH_MS;
import com.google.common.base.Joiner;
import com.google.common.base.Supplier;
import com.google.common.util.concurrent.Uninterruptibles;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.Assert;
import org.junit.After;
import org.junit.BeforeClass;
import org.junit.Test;
import java.io.IOException;
import java.util.HashSet;
import java.util.List;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
public class TestBlockReportRateLimiting {
static final Log LOG = LogFactory.getLog(TestBlockReportRateLimiting.class);
private static void setFailure(AtomicReference<String> failure,
String what) {
failure.compareAndSet("", what);
LOG.error("Test error: " + what);
}
@After
public void restoreNormalBlockManagerFaultInjector() {
BlockManagerFaultInjector.instance = new BlockManagerFaultInjector();
}
@BeforeClass
public static void raiseBlockManagerLogLevels() {
GenericTestUtils.setLogLevel(BlockManager.LOG, Level.ALL);
GenericTestUtils.setLogLevel(BlockReportLeaseManager.LOG, Level.ALL);
}
@Test(timeout=180000)
public void testRateLimitingDuringDataNodeStartup() throws Exception {
Configuration conf = new Configuration();
conf.setInt(DFS_NAMENODE_MAX_FULL_BLOCK_REPORT_LEASES, 1);
conf.setLong(DFS_NAMENODE_FULL_BLOCK_REPORT_LEASE_LENGTH_MS,
20L * 60L * 1000L);
final Semaphore fbrSem = new Semaphore(0);
final HashSet<DatanodeID> expectedFbrDns = new HashSet<>();
final HashSet<DatanodeID> fbrDns = new HashSet<>();
final AtomicReference<String> failure = new AtomicReference<String>("");
final BlockManagerFaultInjector injector = new BlockManagerFaultInjector() {
private int numLeases = 0;
@Override
public void incomingBlockReportRpc(DatanodeID nodeID,
BlockReportContext context) throws IOException {
LOG.info("Incoming full block report from " + nodeID +
". Lease ID = 0x" + Long.toHexString(context.getLeaseId()));
if (context.getLeaseId() == 0) {
setFailure(failure, "Got unexpected rate-limiting-" +
"bypassing full block report RPC from " + nodeID);
}
fbrSem.acquireUninterruptibly();
synchronized (this) {
fbrDns.add(nodeID);
if (!expectedFbrDns.remove(nodeID)) {
setFailure(failure, "Got unexpected full block report " +
"RPC from " + nodeID + ". expectedFbrDns = " +
Joiner.on(", ").join(expectedFbrDns));
}
LOG.info("Proceeding with full block report from " +
nodeID + ". Lease ID = 0x" +
Long.toHexString(context.getLeaseId()));
}
}
@Override
public void requestBlockReportLease(DatanodeDescriptor node,
long leaseId) {
if (leaseId == 0) {
return;
}
synchronized (this) {
numLeases++;
expectedFbrDns.add(node);
LOG.info("requestBlockReportLease(node=" + node +
", leaseId=0x" + Long.toHexString(leaseId) + "). " +
"expectedFbrDns = " + Joiner.on(", ").join(expectedFbrDns));
if (numLeases > 1) {
setFailure(failure, "More than 1 lease was issued at once.");
}
}
}
@Override
public void removeBlockReportLease(DatanodeDescriptor node, long leaseId) {
LOG.info("removeBlockReportLease(node=" + node +
", leaseId=0x" + Long.toHexString(leaseId) + ")");
synchronized (this) {
numLeases--;
}
}
};
BlockManagerFaultInjector.instance = injector;
final int NUM_DATANODES = 5;
MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).build();
cluster.waitActive();
for (int n = 1; n <= NUM_DATANODES; n++) {
LOG.info("Waiting for " + n + " datanode(s) to report in.");
fbrSem.release();
Uninterruptibles.sleepUninterruptibly(20, TimeUnit.MILLISECONDS);
final int currentN = n;
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
synchronized (injector) {
if (fbrDns.size() > currentN) {
setFailure(failure, "Expected at most " + currentN +
" datanodes to have sent a block report, but actually " +
fbrDns.size() + " have.");
}
return (fbrDns.size() >= currentN);
}
}
}, 25, 50000);
}
cluster.shutdown();
Assert.assertEquals("", failure.get());
}
/**
* Start a 2-node cluster with only one block report lease. When the
* first datanode gets a lease, kill it. Then wait for the lease to
* expire, and the second datanode to send a full block report.
*/
@Test(timeout=180000)
public void testLeaseExpiration() throws Exception {
Configuration conf = new Configuration();
conf.setInt(DFS_NAMENODE_MAX_FULL_BLOCK_REPORT_LEASES, 1);
conf.setLong(DFS_NAMENODE_FULL_BLOCK_REPORT_LEASE_LENGTH_MS, 100L);
final Semaphore gotFbrSem = new Semaphore(0);
final AtomicReference<String> failure = new AtomicReference<>();
final AtomicReference<MiniDFSCluster> cluster =
new AtomicReference<>();
final AtomicReference<String> datanodeToStop = new AtomicReference<>();
final BlockManagerFaultInjector injector = new BlockManagerFaultInjector() {
@Override
public void incomingBlockReportRpc(DatanodeID nodeID,
BlockReportContext context) throws IOException {
if (context.getLeaseId() == 0) {
setFailure(failure, "Got unexpected rate-limiting-" +
"bypassing full block report RPC from " + nodeID);
}
if (nodeID.getXferAddr().equals(datanodeToStop.get())) {
throw new IOException("Injecting failure into block " +
"report RPC for " + nodeID);
}
gotFbrSem.release();
}
@Override
public void requestBlockReportLease(DatanodeDescriptor node,
long leaseId) {
if (leaseId == 0) {
return;
}
datanodeToStop.compareAndSet(null, node.getXferAddr());
}
@Override
public void removeBlockReportLease(DatanodeDescriptor node, long leaseId) {
}
};
try {
BlockManagerFaultInjector.instance = injector;
cluster.set(new MiniDFSCluster.Builder(conf).numDataNodes(2).build());
cluster.get().waitActive();
Assert.assertNotNull(cluster.get().stopDataNode(datanodeToStop.get()));
gotFbrSem.acquire();
Assert.assertNull(failure.get());
} finally {
if (cluster.get() != null) {
cluster.get().shutdown();
}
}
}
}
| 8,394 | 36.986425 | 98 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockId.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import java.io.IOException;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.junit.Test;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
/**
* Tests the sequential block ID generation mechanism and block ID
* collision handling.
*/
public class TestSequentialBlockId {
private static final Log LOG = LogFactory.getLog("TestSequentialBlockId");
final int BLOCK_SIZE = 1024;
final int IO_SIZE = BLOCK_SIZE;
final short REPLICATION = 1;
final long SEED = 0;
/**
* Test that block IDs are generated sequentially.
*
* @throws IOException
*/
@Test
public void testBlockIdGeneration() throws IOException {
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
try {
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
// Create a file that is 10 blocks long.
Path path = new Path("testBlockIdGeneration.dat");
DFSTestUtil.createFile(
fs, path, IO_SIZE, BLOCK_SIZE * 10, BLOCK_SIZE, REPLICATION, SEED);
List<LocatedBlock> blocks = DFSTestUtil.getAllBlocks(fs, path);
LOG.info("Block0 id is " + blocks.get(0).getBlock().getBlockId());
long nextBlockExpectedId = blocks.get(0).getBlock().getBlockId() + 1;
// Ensure that the block IDs are sequentially increasing.
for (int i = 1; i < blocks.size(); ++i) {
long nextBlockId = blocks.get(i).getBlock().getBlockId();
LOG.info("Block" + i + " id is " + nextBlockId);
assertThat(nextBlockId, is(nextBlockExpectedId));
++nextBlockExpectedId;
}
} finally {
cluster.shutdown();
}
}
/**
* Test that collisions in the block ID space are handled gracefully.
*
* @throws IOException
*/
@Test
public void testTriggerBlockIdCollision() throws IOException {
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
try {
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
FSNamesystem fsn = cluster.getNamesystem();
final int blockCount = 10;
// Create a file with a few blocks to rev up the global block ID
// counter.
Path path1 = new Path("testBlockIdCollisionDetection_file1.dat");
DFSTestUtil.createFile(
fs, path1, IO_SIZE, BLOCK_SIZE * blockCount,
BLOCK_SIZE, REPLICATION, SEED);
List<LocatedBlock> blocks1 = DFSTestUtil.getAllBlocks(fs, path1);
// Rewind the block ID counter in the name system object. This will result
// in block ID collisions when we try to allocate new blocks.
SequentialBlockIdGenerator blockIdGenerator = fsn.getBlockIdManager()
.getBlockIdGenerator();
blockIdGenerator.setCurrentValue(blockIdGenerator.getCurrentValue() - 5);
// Trigger collisions by creating a new file.
Path path2 = new Path("testBlockIdCollisionDetection_file2.dat");
DFSTestUtil.createFile(
fs, path2, IO_SIZE, BLOCK_SIZE * blockCount,
BLOCK_SIZE, REPLICATION, SEED);
List<LocatedBlock> blocks2 = DFSTestUtil.getAllBlocks(fs, path2);
assertThat(blocks2.size(), is(blockCount));
// Make sure that file2 block IDs start immediately after file1
assertThat(blocks2.get(0).getBlock().getBlockId(),
is(blocks1.get(9).getBlock().getBlockId() + 1));
} finally {
cluster.shutdown();
}
}
/**
* Test that the block type (legacy or not) can be correctly detected
* based on its generation stamp.
*
* @throws IOException
*/
@Test
public void testBlockTypeDetection() throws IOException {
// Setup a mock object and stub out a few routines to
// retrieve the generation stamp counters.
BlockIdManager bid = mock(BlockIdManager.class);
final long maxGenStampForLegacyBlocks = 10000;
when(bid.getGenerationStampV1Limit())
.thenReturn(maxGenStampForLegacyBlocks);
Block legacyBlock = spy(new Block());
when(legacyBlock.getGenerationStamp())
.thenReturn(maxGenStampForLegacyBlocks/2);
Block newBlock = spy(new Block());
when(newBlock.getGenerationStamp())
.thenReturn(maxGenStampForLegacyBlocks+1);
// Make sure that isLegacyBlock() can correctly detect
// legacy and new blocks.
when(bid.isLegacyBlock(any(Block.class))).thenCallRealMethod();
assertThat(bid.isLegacyBlock(legacyBlock), is(true));
assertThat(bid.isLegacyBlock(newBlock), is(false));
}
/**
* Test that the generation stamp for legacy and new blocks is updated
* as expected.
*
* @throws IOException
*/
@Test
public void testGenerationStampUpdate() throws IOException {
// Setup a mock object and stub out a few routines to
// retrieve the generation stamp counters.
BlockIdManager bid = mock(BlockIdManager.class);
final long nextGenerationStampV1 = 5000;
final long nextGenerationStampV2 = 20000;
when(bid.getNextGenerationStampV1())
.thenReturn(nextGenerationStampV1);
when(bid.getNextGenerationStampV2())
.thenReturn(nextGenerationStampV2);
// Make sure that the generation stamp is set correctly for both
// kinds of blocks.
when(bid.nextGenerationStamp(anyBoolean())).thenCallRealMethod();
assertThat(bid.nextGenerationStamp(true), is(nextGenerationStampV1));
assertThat(bid.nextGenerationStamp(false), is(nextGenerationStampV2));
}
}
| 7,087 | 34.79798 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import static org.junit.Assert.assertEquals;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.util.Time;
import org.junit.Test;
/**
* This class provides tests for BlockInfoUnderConstruction class
*/
public class TestBlockInfoUnderConstruction {
@Test
public void testInitializeBlockRecovery() throws Exception {
DatanodeStorageInfo s1 = DFSTestUtil.createDatanodeStorageInfo("10.10.1.1", "s1");
DatanodeDescriptor dd1 = s1.getDatanodeDescriptor();
DatanodeStorageInfo s2 = DFSTestUtil.createDatanodeStorageInfo("10.10.1.2", "s2");
DatanodeDescriptor dd2 = s2.getDatanodeDescriptor();
DatanodeStorageInfo s3 = DFSTestUtil.createDatanodeStorageInfo("10.10.1.3", "s3");
DatanodeDescriptor dd3 = s3.getDatanodeDescriptor();
dd1.isAlive = dd2.isAlive = dd3.isAlive = true;
BlockInfoContiguousUnderConstruction blockInfo = new BlockInfoContiguousUnderConstruction(
new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP),
(short) 3,
BlockUCState.UNDER_CONSTRUCTION,
new DatanodeStorageInfo[] {s1, s2, s3});
// Recovery attempt #1.
DFSTestUtil.resetLastUpdatesWithOffset(dd1, -3 * 1000);
DFSTestUtil.resetLastUpdatesWithOffset(dd2, -1 * 1000);
DFSTestUtil.resetLastUpdatesWithOffset(dd3, -2 * 1000);
blockInfo.initializeBlockRecovery(1);
BlockInfoContiguousUnderConstruction[] blockInfoRecovery = dd2.getLeaseRecoveryCommand(1);
assertEquals(blockInfoRecovery[0], blockInfo);
// Recovery attempt #2.
DFSTestUtil.resetLastUpdatesWithOffset(dd1, -2 * 1000);
DFSTestUtil.resetLastUpdatesWithOffset(dd2, -1 * 1000);
DFSTestUtil.resetLastUpdatesWithOffset(dd3, -3 * 1000);
blockInfo.initializeBlockRecovery(2);
blockInfoRecovery = dd1.getLeaseRecoveryCommand(1);
assertEquals(blockInfoRecovery[0], blockInfo);
// Recovery attempt #3.
DFSTestUtil.resetLastUpdatesWithOffset(dd1, -2 * 1000);
DFSTestUtil.resetLastUpdatesWithOffset(dd2, -1 * 1000);
DFSTestUtil.resetLastUpdatesWithOffset(dd3, -3 * 1000);
blockInfo.initializeBlockRecovery(3);
blockInfoRecovery = dd3.getLeaseRecoveryCommand(1);
assertEquals(blockInfoRecovery[0], blockInfo);
// Recovery attempt #4.
// Reset everything. And again pick DN with most recent heart beat.
DFSTestUtil.resetLastUpdatesWithOffset(dd1, -2 * 1000);
DFSTestUtil.resetLastUpdatesWithOffset(dd2, -1 * 1000);
DFSTestUtil.resetLastUpdatesWithOffset(dd3, 0);
blockInfo.initializeBlockRecovery(3);
blockInfoRecovery = dd3.getLeaseRecoveryCommand(1);
assertEquals(blockInfoRecovery[0], blockInfo);
}
}
| 3,680 | 43.349398 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestAvailableSpaceBlockPlacementPolicy.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import java.io.File;
import java.util.ArrayList;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.TestBlockStoragePolicy;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.test.PathUtils;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestAvailableSpaceBlockPlacementPolicy {
private final static int numRacks = 4;
private final static int nodesPerRack = 5;
private final static int blockSize = 1024;
private final static int chooseTimes = 10000;
private final static String file = "/tobers/test";
private final static int replica = 3;
private static DatanodeStorageInfo[] storages;
private static DatanodeDescriptor[] dataNodes;
private static Configuration conf;
private static NameNode namenode;
private static BlockPlacementPolicy placementPolicy;
private static NetworkTopology cluster;
@BeforeClass
public static void setupCluster() throws Exception {
conf = new HdfsConfiguration();
conf.setFloat(
DFSConfigKeys.DFS_NAMENODE_AVAILABLE_SPACE_BLOCK_PLACEMENT_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY,
0.6f);
String[] racks = new String[numRacks];
for (int i = 0; i < numRacks; i++) {
racks[i] = "/rack" + i;
}
String[] owerRackOfNodes = new String[numRacks * nodesPerRack];
for (int i = 0; i < nodesPerRack; i++) {
for (int j = 0; j < numRacks; j++) {
owerRackOfNodes[i * numRacks + j] = racks[j];
}
}
storages = DFSTestUtil.createDatanodeStorageInfos(owerRackOfNodes);
dataNodes = DFSTestUtil.toDatanodeDescriptor(storages);
FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
File baseDir = PathUtils.getTestDir(AvailableSpaceBlockPlacementPolicy.class);
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, new File(baseDir, "name").getPath());
conf.set(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY,
AvailableSpaceBlockPlacementPolicy.class.getName());
DFSTestUtil.formatNameNode(conf);
namenode = new NameNode(conf);
final BlockManager bm = namenode.getNamesystem().getBlockManager();
placementPolicy = bm.getBlockPlacementPolicy();
cluster = bm.getDatanodeManager().getNetworkTopology();
for (int i = 0; i < nodesPerRack * numRacks; i++) {
cluster.add(dataNodes[i]);
}
setupDataNodeCapacity();
}
private static void updateHeartbeatWithUsage(DatanodeDescriptor dn,
long capacity, long dfsUsed, long remaining, long blockPoolUsed,
long dnCacheCapacity, long dnCacheUsed, int xceiverCount,
int volFailures) {
dn.getStorageInfos()[0].setUtilizationForTesting(
capacity, dfsUsed, remaining, blockPoolUsed);
dn.updateHeartbeat(
BlockManagerTestUtil.getStorageReportsForDatanode(dn),
dnCacheCapacity, dnCacheUsed, xceiverCount, volFailures, null);
}
private static void setupDataNodeCapacity() {
for (int i = 0; i < nodesPerRack * numRacks; i++) {
if ((i % 2) == 0) {
// remaining 100%
updateHeartbeatWithUsage(dataNodes[i], 2 * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE * blockSize,
0L, 2 * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE * blockSize, 0L, 0L, 0L, 0, 0);
} else {
// remaining 50%
updateHeartbeatWithUsage(dataNodes[i], 2 * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE * blockSize,
HdfsServerConstants.MIN_BLOCKS_FOR_WRITE * blockSize, HdfsServerConstants.MIN_BLOCKS_FOR_WRITE
* blockSize, 0L, 0L, 0L, 0, 0);
}
}
}
/*
* To verify that the BlockPlacementPolicy can be replaced by AvailableSpaceBlockPlacementPolicy via
* changing the configuration.
*/
@Test
public void testPolicyReplacement() {
Assert.assertTrue((placementPolicy instanceof AvailableSpaceBlockPlacementPolicy));
}
/*
* Call choose target many times and verify that nodes with more remaining percent will be chosen
* with high possibility.
*/
@Test
public void testChooseTarget() {
int total = 0;
int moreRemainingNode = 0;
for (int i = 0; i < chooseTimes; i++) {
DatanodeStorageInfo[] targets =
namenode
.getNamesystem()
.getBlockManager()
.getBlockPlacementPolicy()
.chooseTarget(file, replica, null, new ArrayList<DatanodeStorageInfo>(), false, null,
blockSize, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY);
Assert.assertTrue(targets.length == replica);
for (int j = 0; j < replica; j++) {
total++;
if (targets[j].getDatanodeDescriptor().getRemainingPercent() > 60) {
moreRemainingNode++;
}
}
}
Assert.assertTrue(total == replica * chooseTimes);
double possibility = 1.0 * moreRemainingNode / total;
Assert.assertTrue(possibility > 0.52);
Assert.assertTrue(possibility < 0.55);
}
@AfterClass
public static void teardownCluster() {
if (namenode != null) {
namenode.stop();
}
}
}
| 6,283 | 36.404762 | 111 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlockQueues.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import org.apache.hadoop.hdfs.protocol.Block;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.fail;
public class TestUnderReplicatedBlockQueues {
private BlockInfo genBlockInfo(long id) {
return new BlockInfoContiguous(new Block(id), (short) 3);
}
/**
* Test that adding blocks with different replication counts puts them
* into different queues
* @throws Throwable if something goes wrong
*/
@Test
public void testBlockPriorities() throws Throwable {
UnderReplicatedBlocks queues = new UnderReplicatedBlocks();
BlockInfo block1 = genBlockInfo(1);
BlockInfo block2 = genBlockInfo(2);
BlockInfo block_very_under_replicated = genBlockInfo(3);
BlockInfo block_corrupt = genBlockInfo(4);
BlockInfo block_corrupt_repl_one = genBlockInfo(5);
//add a block with a single entry
assertAdded(queues, block1, 1, 0, 3);
assertEquals(1, queues.getUnderReplicatedBlockCount());
assertEquals(1, queues.size());
assertInLevel(queues, block1, UnderReplicatedBlocks.QUEUE_HIGHEST_PRIORITY);
//repeated additions fail
assertFalse(queues.add(block1, 1, 0, 3));
//add a second block with two replicas
assertAdded(queues, block2, 2, 0, 3);
assertEquals(2, queues.getUnderReplicatedBlockCount());
assertEquals(2, queues.size());
assertInLevel(queues, block2, UnderReplicatedBlocks.QUEUE_UNDER_REPLICATED);
//now try to add a block that is corrupt
assertAdded(queues, block_corrupt, 0, 0, 3);
assertEquals(3, queues.size());
assertEquals(2, queues.getUnderReplicatedBlockCount());
assertEquals(1, queues.getCorruptBlockSize());
assertInLevel(queues, block_corrupt,
UnderReplicatedBlocks.QUEUE_WITH_CORRUPT_BLOCKS);
//insert a very under-replicated block
assertAdded(queues, block_very_under_replicated, 4, 0, 25);
assertInLevel(queues, block_very_under_replicated,
UnderReplicatedBlocks.QUEUE_VERY_UNDER_REPLICATED);
//insert a corrupt block with replication factor 1
assertAdded(queues, block_corrupt_repl_one, 0, 0, 1);
assertEquals(2, queues.getCorruptBlockSize());
assertEquals(1, queues.getCorruptReplOneBlockSize());
queues.update(block_corrupt_repl_one, 0, 0, 3, 0, 2);
assertEquals(0, queues.getCorruptReplOneBlockSize());
queues.update(block_corrupt, 0, 0, 1, 0, -2);
assertEquals(1, queues.getCorruptReplOneBlockSize());
queues.update(block_very_under_replicated, 0, 0, 1, -4, -24);
assertEquals(2, queues.getCorruptReplOneBlockSize());
}
private void assertAdded(UnderReplicatedBlocks queues,
BlockInfo block,
int curReplicas,
int decomissionedReplicas,
int expectedReplicas) {
assertTrue("Failed to add " + block,
queues.add(block,
curReplicas,
decomissionedReplicas,
expectedReplicas));
}
/**
* Determine whether or not a block is in a level without changing the API.
* Instead get the per-level iterator and run though it looking for a match.
* If the block is not found, an assertion is thrown.
*
* This is inefficient, but this is only a test case.
* @param queues queues to scan
* @param block block to look for
* @param level level to select
*/
private void assertInLevel(UnderReplicatedBlocks queues,
Block block,
int level) {
UnderReplicatedBlocks.BlockIterator bi = queues.iterator(level);
while (bi.hasNext()) {
Block next = bi.next();
if (block.equals(next)) {
return;
}
}
fail("Block " + block + " not found in level " + level);
}
}
| 4,800 | 38.03252 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import static org.junit.Assert.assertEquals;
import java.util.ArrayList;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.util.Time;
import org.junit.Test;
/**
* Test if FSNamesystem handles heartbeat right
*/
public class TestHeartbeatHandling {
/**
* Test if
* {@link FSNamesystem#handleHeartbeat}
* can pick up replication and/or invalidate requests and observes the max
* limit
*/
@Test
public void testHeartbeat() throws Exception {
final Configuration conf = new HdfsConfiguration();
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitActive();
final FSNamesystem namesystem = cluster.getNamesystem();
final HeartbeatManager hm = namesystem.getBlockManager(
).getDatanodeManager().getHeartbeatManager();
final String poolId = namesystem.getBlockPoolId();
final DatanodeRegistration nodeReg =
DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId);
final DatanodeDescriptor dd = NameNodeAdapter.getDatanode(namesystem, nodeReg);
final String storageID = DatanodeStorage.generateUuid();
dd.updateStorage(new DatanodeStorage(storageID));
final int REMAINING_BLOCKS = 1;
final int MAX_REPLICATE_LIMIT =
conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 2);
final int MAX_INVALIDATE_LIMIT = DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT;
final int MAX_INVALIDATE_BLOCKS = 2*MAX_INVALIDATE_LIMIT+REMAINING_BLOCKS;
final int MAX_REPLICATE_BLOCKS = 2*MAX_REPLICATE_LIMIT+REMAINING_BLOCKS;
final DatanodeStorageInfo[] ONE_TARGET = {dd.getStorageInfo(storageID)};
try {
namesystem.writeLock();
synchronized(hm) {
for (int i=0; i<MAX_REPLICATE_BLOCKS; i++) {
dd.addBlockToBeReplicated(
new Block(i, 0, GenerationStamp.LAST_RESERVED_STAMP),
ONE_TARGET);
}
DatanodeCommand[] cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd,
namesystem).getCommands();
assertEquals(1, cmds.length);
assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
assertEquals(MAX_REPLICATE_LIMIT, ((BlockCommand)cmds[0]).getBlocks().length);
ArrayList<Block> blockList = new ArrayList<Block>(MAX_INVALIDATE_BLOCKS);
for (int i=0; i<MAX_INVALIDATE_BLOCKS; i++) {
blockList.add(new Block(i, 0, GenerationStamp.LAST_RESERVED_STAMP));
}
dd.addBlocksToBeInvalidated(blockList);
cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem)
.getCommands();
assertEquals(2, cmds.length);
assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
assertEquals(MAX_REPLICATE_LIMIT, ((BlockCommand)cmds[0]).getBlocks().length);
assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[1].getAction());
assertEquals(MAX_INVALIDATE_LIMIT, ((BlockCommand)cmds[1]).getBlocks().length);
cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem)
.getCommands();
assertEquals(2, cmds.length);
assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
assertEquals(REMAINING_BLOCKS, ((BlockCommand)cmds[0]).getBlocks().length);
assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[1].getAction());
assertEquals(MAX_INVALIDATE_LIMIT, ((BlockCommand)cmds[1]).getBlocks().length);
cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem)
.getCommands();
assertEquals(1, cmds.length);
assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[0].getAction());
assertEquals(REMAINING_BLOCKS, ((BlockCommand)cmds[0]).getBlocks().length);
cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem)
.getCommands();
assertEquals(0, cmds.length);
}
} finally {
namesystem.writeUnlock();
}
} finally {
cluster.shutdown();
}
}
/**
* Test if
* {@link FSNamesystem#handleHeartbeat}
* correctly selects data node targets for block recovery.
*/
@Test
public void testHeartbeatBlockRecovery() throws Exception {
final Configuration conf = new HdfsConfiguration();
final MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
try {
cluster.waitActive();
final FSNamesystem namesystem = cluster.getNamesystem();
final HeartbeatManager hm = namesystem.getBlockManager(
).getDatanodeManager().getHeartbeatManager();
final String poolId = namesystem.getBlockPoolId();
final DatanodeRegistration nodeReg1 =
DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId);
final DatanodeDescriptor dd1 = NameNodeAdapter.getDatanode(namesystem, nodeReg1);
dd1.updateStorage(new DatanodeStorage(DatanodeStorage.generateUuid()));
final DatanodeRegistration nodeReg2 =
DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(1), poolId);
final DatanodeDescriptor dd2 = NameNodeAdapter.getDatanode(namesystem, nodeReg2);
dd2.updateStorage(new DatanodeStorage(DatanodeStorage.generateUuid()));
final DatanodeRegistration nodeReg3 =
DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(2), poolId);
final DatanodeDescriptor dd3 = NameNodeAdapter.getDatanode(namesystem, nodeReg3);
dd3.updateStorage(new DatanodeStorage(DatanodeStorage.generateUuid()));
try {
namesystem.writeLock();
synchronized(hm) {
NameNodeAdapter.sendHeartBeat(nodeReg1, dd1, namesystem);
NameNodeAdapter.sendHeartBeat(nodeReg2, dd2, namesystem);
NameNodeAdapter.sendHeartBeat(nodeReg3, dd3, namesystem);
// Test with all alive nodes.
DFSTestUtil.resetLastUpdatesWithOffset(dd1, 0);
DFSTestUtil.resetLastUpdatesWithOffset(dd2, 0);
DFSTestUtil.resetLastUpdatesWithOffset(dd3, 0);
final DatanodeStorageInfo[] storages = {
dd1.getStorageInfos()[0],
dd2.getStorageInfos()[0],
dd3.getStorageInfos()[0]};
BlockInfoContiguousUnderConstruction blockInfo = new BlockInfoContiguousUnderConstruction(
new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP), (short) 3,
BlockUCState.UNDER_RECOVERY, storages);
dd1.addBlockToBeRecovered(blockInfo);
DatanodeCommand[] cmds =
NameNodeAdapter.sendHeartBeat(nodeReg1, dd1, namesystem).getCommands();
assertEquals(1, cmds.length);
assertEquals(DatanodeProtocol.DNA_RECOVERBLOCK, cmds[0].getAction());
BlockRecoveryCommand recoveryCommand = (BlockRecoveryCommand)cmds[0];
assertEquals(1, recoveryCommand.getRecoveringBlocks().size());
DatanodeInfo[] recoveringNodes = recoveryCommand.getRecoveringBlocks()
.toArray(new BlockRecoveryCommand.RecoveringBlock[0])[0].getLocations();
assertEquals(3, recoveringNodes.length);
assertEquals(recoveringNodes[0], dd1);
assertEquals(recoveringNodes[1], dd2);
assertEquals(recoveringNodes[2], dd3);
// Test with one stale node.
DFSTestUtil.resetLastUpdatesWithOffset(dd1, 0);
// More than the default stale interval of 30 seconds.
DFSTestUtil.resetLastUpdatesWithOffset(dd2, -40 * 1000);
DFSTestUtil.resetLastUpdatesWithOffset(dd3, 0);
blockInfo = new BlockInfoContiguousUnderConstruction(
new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP), (short) 3,
BlockUCState.UNDER_RECOVERY, storages);
dd1.addBlockToBeRecovered(blockInfo);
cmds = NameNodeAdapter.sendHeartBeat(nodeReg1, dd1, namesystem).getCommands();
assertEquals(1, cmds.length);
assertEquals(DatanodeProtocol.DNA_RECOVERBLOCK, cmds[0].getAction());
recoveryCommand = (BlockRecoveryCommand)cmds[0];
assertEquals(1, recoveryCommand.getRecoveringBlocks().size());
recoveringNodes = recoveryCommand.getRecoveringBlocks()
.toArray(new BlockRecoveryCommand.RecoveringBlock[0])[0].getLocations();
assertEquals(2, recoveringNodes.length);
// dd2 is skipped.
assertEquals(recoveringNodes[0], dd1);
assertEquals(recoveringNodes[1], dd3);
// Test with all stale node.
DFSTestUtil.resetLastUpdatesWithOffset(dd1, - 60 * 1000);
// More than the default stale interval of 30 seconds.
DFSTestUtil.resetLastUpdatesWithOffset(dd2, - 40 * 1000);
DFSTestUtil.resetLastUpdatesWithOffset(dd3, - 80 * 1000);
blockInfo = new BlockInfoContiguousUnderConstruction(
new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP), (short) 3,
BlockUCState.UNDER_RECOVERY, storages);
dd1.addBlockToBeRecovered(blockInfo);
cmds = NameNodeAdapter.sendHeartBeat(nodeReg1, dd1, namesystem).getCommands();
assertEquals(1, cmds.length);
assertEquals(DatanodeProtocol.DNA_RECOVERBLOCK, cmds[0].getAction());
recoveryCommand = (BlockRecoveryCommand)cmds[0];
assertEquals(1, recoveryCommand.getRecoveringBlocks().size());
recoveringNodes = recoveryCommand.getRecoveringBlocks()
.toArray(new BlockRecoveryCommand.RecoveringBlock[0])[0].getLocations();
// Only dd1 is included since it heart beated and hence its not stale
// when the list of recovery blocks is constructed.
assertEquals(3, recoveringNodes.length);
assertEquals(recoveringNodes[0], dd1);
assertEquals(recoveringNodes[1], dd2);
assertEquals(recoveringNodes[2], dd3);
}
} finally {
namesystem.writeUnlock();
}
} finally {
cluster.shutdown();
}
}
}
| 11,951 | 47.783673 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import com.google.common.base.Supplier;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.Assert;
import org.junit.Test;
import java.io.BufferedOutputStream;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FileReader;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.util.Arrays;
import java.util.EnumSet;
import java.util.Iterator;
import java.util.List;
import java.util.UUID;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertEquals;
public class TestNameNodePrunesMissingStorages {
static final Log LOG = LogFactory.getLog(TestNameNodePrunesMissingStorages.class);
private static void runTest(final String testCaseName,
final boolean createFiles,
final int numInitialStorages,
final int expectedStoragesAfterTest) throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster
.Builder(conf)
.numDataNodes(1)
.storagesPerDatanode(numInitialStorages)
.build();
cluster.waitActive();
final DataNode dn0 = cluster.getDataNodes().get(0);
// Ensure NN knows about the storage.
final DatanodeID dnId = dn0.getDatanodeId();
final DatanodeDescriptor dnDescriptor =
cluster.getNamesystem().getBlockManager().getDatanodeManager().getDatanode(dnId);
assertThat(dnDescriptor.getStorageInfos().length, is(numInitialStorages));
final String bpid = cluster.getNamesystem().getBlockPoolId();
final DatanodeRegistration dnReg = dn0.getDNRegistrationForBP(bpid);
DataNodeTestUtils.triggerBlockReport(dn0);
if (createFiles) {
final Path path = new Path("/", testCaseName);
DFSTestUtil.createFile(
cluster.getFileSystem(), path, 1024, (short) 1, 0x1BAD5EED);
DataNodeTestUtils.triggerBlockReport(dn0);
}
// Generate a fake StorageReport that is missing one storage.
final StorageReport reports[] =
dn0.getFSDataset().getStorageReports(bpid);
final StorageReport prunedReports[] = new StorageReport[numInitialStorages - 1];
System.arraycopy(reports, 0, prunedReports, 0, prunedReports.length);
// Stop the DataNode and send fake heartbeat with missing storage.
cluster.stopDataNode(0);
cluster.getNameNodeRpc().sendHeartbeat(dnReg, prunedReports, 0L, 0L, 0, 0,
0, null, true);
// Check that the missing storage was pruned.
assertThat(dnDescriptor.getStorageInfos().length, is(expectedStoragesAfterTest));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
/**
* Test that the NameNode prunes empty storage volumes that are no longer
* reported by the DataNode.
* @throws IOException
*/
@Test (timeout=300000)
public void testUnusedStorageIsPruned() throws IOException {
// Run the test with 1 storage, after the text expect 0 storages.
runTest(GenericTestUtils.getMethodName(), false, 1, 0);
}
/**
* Verify that the NameNode does not prune storages with blocks
* simply as a result of a heartbeat being sent missing that storage.
*
* @throws IOException
*/
@Test (timeout=300000)
public void testStorageWithBlocksIsNotPruned() throws IOException {
// Run the test with 1 storage, after the text still expect 1 storage.
runTest(GenericTestUtils.getMethodName(), true, 1, 1);
}
/**
* Regression test for HDFS-7960.<p/>
*
* Shutting down a datanode, removing a storage directory, and restarting
* the DataNode should not produce zombie storages.
*/
@Test(timeout=300000)
public void testRemovingStorageDoesNotProduceZombies() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1);
final int NUM_STORAGES_PER_DN = 2;
final MiniDFSCluster cluster = new MiniDFSCluster
.Builder(conf).numDataNodes(3)
.storagesPerDatanode(NUM_STORAGES_PER_DN)
.build();
try {
cluster.waitActive();
for (DataNode dn : cluster.getDataNodes()) {
assertEquals(NUM_STORAGES_PER_DN,
cluster.getNamesystem().getBlockManager().
getDatanodeManager().getDatanode(dn.getDatanodeId()).
getStorageInfos().length);
}
// Create a file which will end up on all 3 datanodes.
final Path TEST_PATH = new Path("/foo1");
DistributedFileSystem fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, TEST_PATH, 1024, (short) 3, 0xcafecafe);
for (DataNode dn : cluster.getDataNodes()) {
DataNodeTestUtils.triggerBlockReport(dn);
}
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, new Path("/foo1"));
cluster.getNamesystem().writeLock();
final String storageIdToRemove;
String datanodeUuid;
// Find the first storage which this block is in.
try {
Iterator<DatanodeStorageInfo> storageInfoIter =
cluster.getNamesystem().getBlockManager().
getStorages(block.getLocalBlock()).iterator();
assertTrue(storageInfoIter.hasNext());
DatanodeStorageInfo info = storageInfoIter.next();
storageIdToRemove = info.getStorageID();
datanodeUuid = info.getDatanodeDescriptor().getDatanodeUuid();
} finally {
cluster.getNamesystem().writeUnlock();
}
// Find the DataNode which holds that first storage.
final DataNode datanodeToRemoveStorageFrom;
int datanodeToRemoveStorageFromIdx = 0;
while (true) {
if (datanodeToRemoveStorageFromIdx >= cluster.getDataNodes().size()) {
Assert.fail("failed to find datanode with uuid " + datanodeUuid);
datanodeToRemoveStorageFrom = null;
break;
}
DataNode dn = cluster.getDataNodes().
get(datanodeToRemoveStorageFromIdx);
if (dn.getDatanodeUuid().equals(datanodeUuid)) {
datanodeToRemoveStorageFrom = dn;
break;
}
datanodeToRemoveStorageFromIdx++;
}
// Find the volume within the datanode which holds that first storage.
String volumeDirectoryToRemove = null;
try (FsVolumeReferences volumes =
datanodeToRemoveStorageFrom.getFSDataset().getFsVolumeReferences()) {
assertEquals(NUM_STORAGES_PER_DN, volumes.size());
for (FsVolumeSpi volume : volumes) {
if (volume.getStorageID().equals(storageIdToRemove)) {
volumeDirectoryToRemove = volume.getBasePath();
}
}
};
// Shut down the datanode and remove the volume.
// Replace the volume directory with a regular file, which will
// cause a volume failure. (If we merely removed the directory,
// it would be re-initialized with a new storage ID.)
assertNotNull(volumeDirectoryToRemove);
datanodeToRemoveStorageFrom.shutdown();
FileUtil.fullyDelete(new File(volumeDirectoryToRemove));
FileOutputStream fos = new FileOutputStream(volumeDirectoryToRemove);
try {
fos.write(1);
} finally {
fos.close();
}
cluster.restartDataNode(datanodeToRemoveStorageFromIdx);
// Wait for the NameNode to remove the storage.
LOG.info("waiting for the datanode to remove " + storageIdToRemove);
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
final DatanodeDescriptor dnDescriptor =
cluster.getNamesystem().getBlockManager().getDatanodeManager().
getDatanode(datanodeToRemoveStorageFrom.getDatanodeUuid());
assertNotNull(dnDescriptor);
DatanodeStorageInfo[] infos = dnDescriptor.getStorageInfos();
for (DatanodeStorageInfo info : infos) {
if (info.getStorageID().equals(storageIdToRemove)) {
LOG.info("Still found storage " + storageIdToRemove + " on " +
info + ".");
return false;
}
}
assertEquals(NUM_STORAGES_PER_DN - 1, infos.length);
return true;
}
}, 10, 30000);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
private static void rewriteVersionFile(File versionFile,
String newStorageId) throws IOException {
BufferedReader in = new BufferedReader(new FileReader(versionFile));
File newVersionFile =
new File(versionFile.getParent(), UUID.randomUUID().toString());
Writer out = new BufferedWriter(new OutputStreamWriter(
new FileOutputStream(newVersionFile), "UTF-8"));
final String STORAGE_ID = "storageID=";
boolean success = false;
try {
String line;
while ((line = in.readLine()) != null) {
if (line.startsWith(STORAGE_ID)) {
out.write(STORAGE_ID + newStorageId + "\n");
} else {
out.write(line + "\n");
}
}
in.close();
in = null;
out.close();
out = null;
newVersionFile.renameTo(versionFile);
success = true;
} finally {
if (in != null) {
in.close();
}
if (out != null) {
out.close();
}
if (!success) {
versionFile.delete();
}
}
}
@Test(timeout=300000)
public void testRenamingStorageIds() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 0);
final MiniDFSCluster cluster = new MiniDFSCluster
.Builder(conf).numDataNodes(1)
.storagesPerDatanode(1)
.build();
GenericTestUtils.setLogLevel(BlockManager.LOG, Level.ALL);
try {
cluster.waitActive();
final Path TEST_PATH = new Path("/foo1");
DistributedFileSystem fs = cluster.getFileSystem();
// Create a file and leave it open
DFSTestUtil.createFile(fs, TEST_PATH, 1, (short)1, 0xdeadbeef);
// Find the volume within the datanode which holds that first storage.
DataNode dn = cluster.getDataNodes().get(0);
FsVolumeReferences volumeRefs =
dn.getFSDataset().getFsVolumeReferences();
final String newStorageId = DatanodeStorage.generateUuid();
try {
File currentDir = new File(volumeRefs.get(0).getBasePath(), "current");
File versionFile = new File(currentDir, "VERSION");
rewriteVersionFile(versionFile, newStorageId);
} finally {
volumeRefs.close();
}
final ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, TEST_PATH);
cluster.restartDataNodes();
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
cluster.getNamesystem().writeLock();
try {
Iterator<DatanodeStorageInfo> storageInfoIter =
cluster.getNamesystem().getBlockManager().
getStorages(block.getLocalBlock()).iterator();
if (!storageInfoIter.hasNext()) {
LOG.info("Expected to find a storage for " +
block.getBlockName() + ", but nothing was found. " +
"Continuing to wait.");
return false;
}
DatanodeStorageInfo info = storageInfoIter.next();
if (!newStorageId.equals(info.getStorageID())) {
LOG.info("Expected " + block.getBlockName() + " to " +
"be in storage id " + newStorageId + ", but it " +
"was in " + info.getStorageID() + ". Continuing " +
"to wait.");
return false;
}
LOG.info("Successfully found " + block.getBlockName() + " in " +
"be in storage id " + newStorageId);
} finally {
cluster.getNamesystem().writeUnlock();
}
return true;
}
}, 20, 100000);
} finally {
cluster.shutdown();
}
}
}
| 14,460 | 38.189702 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.TestBlockStoragePolicy;
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.util.VersionInfo;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestReplicationPolicyConsiderLoad {
private static NameNode namenode;
private static DatanodeManager dnManager;
private static List<DatanodeRegistration> dnrList;
private static DatanodeDescriptor[] dataNodes;
private static DatanodeStorageInfo[] storages;
@BeforeClass
public static void setupCluster() throws IOException {
Configuration conf = new HdfsConfiguration();
final String[] racks = {
"/rack1",
"/rack1",
"/rack1",
"/rack2",
"/rack2",
"/rack2"};
storages = DFSTestUtil.createDatanodeStorageInfos(racks);
dataNodes = DFSTestUtil.toDatanodeDescriptor(storages);
FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
new File(baseDir, "name").getPath());
conf.setBoolean(
DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true);
conf.setBoolean(
DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, true);
conf.setBoolean(
DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, true);
DFSTestUtil.formatNameNode(conf);
namenode = new NameNode(conf);
int blockSize = 1024;
dnrList = new ArrayList<DatanodeRegistration>();
dnManager = namenode.getNamesystem().getBlockManager().getDatanodeManager();
// Register DNs
for (int i=0; i < 6; i++) {
DatanodeRegistration dnr = new DatanodeRegistration(dataNodes[i],
new StorageInfo(NodeType.DATA_NODE), new ExportedBlockKeys(),
VersionInfo.getVersion());
dnrList.add(dnr);
dnManager.registerDatanode(dnr);
dataNodes[i].getStorageInfos()[0].setUtilizationForTesting(
2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*blockSize, 0L,
2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*blockSize, 0L);
dataNodes[i].updateHeartbeat(
BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[i]),
0L, 0L, 0, 0, null);
}
}
private final double EPSILON = 0.0001;
/**
* Tests that chooseTarget with considerLoad set to true correctly calculates
* load with decommissioned nodes.
*/
@Test
public void testChooseTargetWithDecomNodes() throws IOException {
namenode.getNamesystem().writeLock();
try {
String blockPoolId = namenode.getNamesystem().getBlockPoolId();
dnManager.handleHeartbeat(dnrList.get(3),
BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[3]),
blockPoolId, dataNodes[3].getCacheCapacity(),
dataNodes[3].getCacheRemaining(),
2, 0, 0, null);
dnManager.handleHeartbeat(dnrList.get(4),
BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[4]),
blockPoolId, dataNodes[4].getCacheCapacity(),
dataNodes[4].getCacheRemaining(),
4, 0, 0, null);
dnManager.handleHeartbeat(dnrList.get(5),
BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[5]),
blockPoolId, dataNodes[5].getCacheCapacity(),
dataNodes[5].getCacheRemaining(),
4, 0, 0, null);
// value in the above heartbeats
final int load = 2 + 4 + 4;
FSNamesystem fsn = namenode.getNamesystem();
assertEquals((double)load/6, dnManager.getFSClusterStats()
.getInServiceXceiverAverage(), EPSILON);
// Decommission DNs so BlockPlacementPolicyDefault.isGoodTarget()
// returns false
for (int i = 0; i < 3; i++) {
DatanodeDescriptor d = dnManager.getDatanode(dnrList.get(i));
dnManager.getDecomManager().startDecommission(d);
d.setDecommissioned();
}
assertEquals((double)load/3, dnManager.getFSClusterStats()
.getInServiceXceiverAverage(), EPSILON);
// update references of writer DN to update the de-commissioned state
List<DatanodeDescriptor> liveNodes = new ArrayList<DatanodeDescriptor>();
dnManager.fetchDatanodes(liveNodes, null, false);
DatanodeDescriptor writerDn = null;
if (liveNodes.contains(dataNodes[0])) {
writerDn = liveNodes.get(liveNodes.indexOf(dataNodes[0]));
}
// Call chooseTarget()
DatanodeStorageInfo[] targets = namenode.getNamesystem().getBlockManager()
.getBlockPlacementPolicy().chooseTarget("testFile.txt", 3,
writerDn, new ArrayList<DatanodeStorageInfo>(), false, null,
1024, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY);
assertEquals(3, targets.length);
Set<DatanodeStorageInfo> targetSet = new HashSet<DatanodeStorageInfo>(
Arrays.asList(targets));
for (int i = 3; i < storages.length; i++) {
assertTrue(targetSet.contains(storages[i]));
}
} finally {
dataNodes[0].stopDecommission();
dataNodes[1].stopDecommission();
dataNodes[2].stopDecommission();
namenode.getNamesystem().writeUnlock();
}
}
@AfterClass
public static void teardownCluster() {
if (namenode != null) namenode.stop();
}
}
| 7,198 | 38.994444 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import static org.junit.Assert.assertTrue;
import java.util.Collection;
import java.util.concurrent.TimeoutException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.util.Time;
import org.junit.Test;
/**
* Test if live nodes count per node is correct
* so NN makes right decision for under/over-replicated blocks
*
* Two of the "while" loops below use "busy wait"
* because they are detecting transient states.
*/
public class TestNodeCount {
final short REPLICATION_FACTOR = (short)2;
final long TIMEOUT = 20000L;
long timeout = 0;
long failtime = 0;
Block lastBlock = null;
NumberReplicas lastNum = null;
@Test
public void testNodeCount() throws Exception {
// start a mini dfs cluster of 2 nodes
final Configuration conf = new HdfsConfiguration();
final MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION_FACTOR).build();
try {
final FSNamesystem namesystem = cluster.getNamesystem();
final BlockManager bm = namesystem.getBlockManager();
final HeartbeatManager hm = bm.getDatanodeManager().getHeartbeatManager();
final FileSystem fs = cluster.getFileSystem();
// populate the cluster with a one block file
final Path FILE_PATH = new Path("/testfile");
DFSTestUtil.createFile(fs, FILE_PATH, 1L, REPLICATION_FACTOR, 1L);
DFSTestUtil.waitReplication(fs, FILE_PATH, REPLICATION_FACTOR);
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, FILE_PATH);
// keep a copy of all datanode descriptor
final DatanodeDescriptor[] datanodes = hm.getDatanodes();
// start two new nodes
cluster.startDataNodes(conf, 2, true, null, null);
cluster.waitActive();
// bring down first datanode
DatanodeDescriptor datanode = datanodes[0];
DataNodeProperties dnprop = cluster.stopDataNode(datanode.getXferAddr());
// make sure that NN detects that the datanode is down
BlockManagerTestUtil.noticeDeadDatanode(
cluster.getNameNode(), datanode.getXferAddr());
// the block will be replicated
DFSTestUtil.waitReplication(fs, FILE_PATH, REPLICATION_FACTOR);
// restart the first datanode
cluster.restartDataNode(dnprop);
cluster.waitActive();
// check if excessive replica is detected (transient)
initializeTimeout(TIMEOUT);
while (countNodes(block.getLocalBlock(), namesystem).excessReplicas() == 0) {
checkTimeout("excess replicas not detected");
}
// find out a non-excess node
DatanodeDescriptor nonExcessDN = null;
for(DatanodeStorageInfo storage : bm.blocksMap.getStorages(block.getLocalBlock())) {
final DatanodeDescriptor dn = storage.getDatanodeDescriptor();
Collection<Block> blocks = bm.excessReplicateMap.get(dn.getDatanodeUuid());
if (blocks == null || !blocks.contains(block.getLocalBlock()) ) {
nonExcessDN = dn;
break;
}
}
assertTrue(nonExcessDN!=null);
// bring down non excessive datanode
dnprop = cluster.stopDataNode(nonExcessDN.getXferAddr());
// make sure that NN detects that the datanode is down
BlockManagerTestUtil.noticeDeadDatanode(
cluster.getNameNode(), nonExcessDN.getXferAddr());
// The block should be replicated
initializeTimeout(TIMEOUT);
while (countNodes(block.getLocalBlock(), namesystem).liveReplicas() != REPLICATION_FACTOR) {
checkTimeout("live replica count not correct", 1000);
}
// restart the first datanode
cluster.restartDataNode(dnprop);
cluster.waitActive();
// check if excessive replica is detected (transient)
initializeTimeout(TIMEOUT);
while (countNodes(block.getLocalBlock(), namesystem).excessReplicas() != 2) {
checkTimeout("excess replica count not equal to 2");
}
} finally {
cluster.shutdown();
}
}
void initializeTimeout(long timeout) {
this.timeout = timeout;
this.failtime = Time.monotonicNow()
+ ((timeout <= 0) ? Long.MAX_VALUE : timeout);
}
/* busy wait on transient conditions */
void checkTimeout(String testLabel) throws TimeoutException {
checkTimeout(testLabel, 0);
}
/* check for timeout, then wait for cycleTime msec */
void checkTimeout(String testLabel, long cycleTime) throws TimeoutException {
if (Time.monotonicNow() > failtime) {
throw new TimeoutException("Timeout: "
+ testLabel + " for block " + lastBlock + " after " + timeout
+ " msec. Last counts: live = " + lastNum.liveReplicas()
+ ", excess = " + lastNum.excessReplicas()
+ ", corrupt = " + lastNum.corruptReplicas());
}
if (cycleTime > 0) {
try {
Thread.sleep(cycleTime);
} catch (InterruptedException ie) {
//ignore
}
}
}
/* threadsafe read of the replication counts for this block */
NumberReplicas countNodes(Block block, FSNamesystem namesystem) {
BlockManager blockManager = namesystem.getBlockManager();
namesystem.readLock();
try {
lastBlock = block;
lastNum = blockManager.countNodes(blockManager.getStoredBlock(block));
return lastNum;
}
finally {
namesystem.readUnlock();
}
}
}
| 6,671 | 35.861878 | 98 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import java.text.SimpleDateFormat;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
import org.mockito.internal.util.reflection.Whitebox;
/**
* Test if we can correctly delay the deletion of blocks.
*/
public class TestPendingInvalidateBlock {
{
GenericTestUtils.setLogLevel(BlockManager.LOG, Level.DEBUG);
}
private static final int BLOCKSIZE = 1024;
private static final short REPLICATION = 2;
private Configuration conf;
private MiniDFSCluster cluster;
private DistributedFileSystem dfs;
@Before
public void setUp() throws Exception {
conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
// block deletion pending period
conf.setLong(DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_KEY, 5L);
// set the block report interval to 2s
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 2000);
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
// disable the RPC timeout for debug
conf.setLong(CommonConfigurationKeys.IPC_PING_INTERVAL_KEY, 0);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
.build();
cluster.waitActive();
dfs = cluster.getFileSystem();
}
@After
public void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
@Test
public void testPendingDeletion() throws Exception {
final Path foo = new Path("/foo");
DFSTestUtil.createFile(dfs, foo, BLOCKSIZE, REPLICATION, 0);
// restart NN
cluster.restartNameNode(true);
dfs.delete(foo, true);
Assert.assertEquals(0, cluster.getNamesystem().getBlocksTotal());
Assert.assertEquals(REPLICATION, cluster.getNamesystem()
.getPendingDeletionBlocks());
Thread.sleep(6000);
Assert.assertEquals(0, cluster.getNamesystem().getBlocksTotal());
Assert.assertEquals(0, cluster.getNamesystem().getPendingDeletionBlocks());
String nnStartedStr = cluster.getNamesystem().getNNStarted();
long nnStarted = new SimpleDateFormat("EEE MMM dd HH:mm:ss zzz yyyy")
.parse(nnStartedStr).getTime();
long blockDeletionStartTime = cluster.getNamesystem()
.getBlockDeletionStartTime();
Assert.assertTrue(String.format(
"Expect blockDeletionStartTime = %d > nnStarted = %d/nnStartedStr = %s.",
blockDeletionStartTime, nnStarted, nnStartedStr),
blockDeletionStartTime > nnStarted);
}
/**
* Test whether we can delay the deletion of unknown blocks in DataNode's
* first several block reports.
*/
@Test
public void testPendingDeleteUnknownBlocks() throws Exception {
final int fileNum = 5; // 5 files
final Path[] files = new Path[fileNum];
final DataNodeProperties[] dnprops = new DataNodeProperties[REPLICATION];
// create a group of files, each file contains 1 block
for (int i = 0; i < fileNum; i++) {
files[i] = new Path("/file" + i);
DFSTestUtil.createFile(dfs, files[i], BLOCKSIZE, REPLICATION, i);
}
// wait until all DataNodes have replicas
waitForReplication();
for (int i = REPLICATION - 1; i >= 0; i--) {
dnprops[i] = cluster.stopDataNode(i);
}
Thread.sleep(2000);
// delete 2 files, we still have 3 files remaining so that we can cover
// every DN storage
for (int i = 0; i < 2; i++) {
dfs.delete(files[i], true);
}
// restart NameNode
cluster.restartNameNode(false);
InvalidateBlocks invalidateBlocks = (InvalidateBlocks) Whitebox
.getInternalState(cluster.getNamesystem().getBlockManager(),
"invalidateBlocks");
InvalidateBlocks mockIb = Mockito.spy(invalidateBlocks);
Mockito.doReturn(1L).when(mockIb).getInvalidationDelay();
Whitebox.setInternalState(cluster.getNamesystem().getBlockManager(),
"invalidateBlocks", mockIb);
Assert.assertEquals(0L, cluster.getNamesystem().getPendingDeletionBlocks());
// restart DataNodes
for (int i = 0; i < REPLICATION; i++) {
cluster.restartDataNode(dnprops[i], true);
}
cluster.waitActive();
for (int i = 0; i < REPLICATION; i++) {
DataNodeTestUtils.triggerBlockReport(cluster.getDataNodes().get(i));
}
Thread.sleep(2000);
// make sure we have received block reports by checking the total block #
Assert.assertEquals(3, cluster.getNamesystem().getBlocksTotal());
Assert.assertEquals(4, cluster.getNamesystem().getPendingDeletionBlocks());
cluster.restartNameNode(true);
Thread.sleep(6000);
Assert.assertEquals(3, cluster.getNamesystem().getBlocksTotal());
Assert.assertEquals(0, cluster.getNamesystem().getPendingDeletionBlocks());
}
private long waitForReplication() throws Exception {
for (int i = 0; i < 10; i++) {
long ur = cluster.getNamesystem().getUnderReplicatedBlocks();
if (ur == 0) {
return 0;
} else {
Thread.sleep(1000);
}
}
return cluster.getNamesystem().getUnderReplicatedBlocks();
}
}
| 6,536 | 36.568966 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.util.ArrayList;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus;
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
import org.junit.Test;
import org.mockito.Mockito;
/**
* This class tests the internals of PendingReplicationBlocks.java,
* as well as how PendingReplicationBlocks acts in BlockManager
*/
public class TestPendingReplication {
final static int TIMEOUT = 3; // 3 seconds
private static final int DFS_REPLICATION_INTERVAL = 1;
// Number of datanodes in the cluster
private static final int DATANODE_COUNT = 5;
private BlockInfo genBlockInfo(long id, long length, long gs) {
return new BlockInfoContiguous(new Block(id, length, gs),
(short) DATANODE_COUNT);
}
@Test
public void testPendingReplication() {
PendingReplicationBlocks pendingReplications;
pendingReplications = new PendingReplicationBlocks(TIMEOUT * 1000);
pendingReplications.start();
//
// Add 10 blocks to pendingReplications.
//
DatanodeStorageInfo[] storages = DFSTestUtil.createDatanodeStorageInfos(10);
for (int i = 0; i < storages.length; i++) {
BlockInfo block = genBlockInfo(i, i, 0);
DatanodeStorageInfo[] targets = new DatanodeStorageInfo[i];
System.arraycopy(storages, 0, targets, 0, i);
pendingReplications.increment(block,
DatanodeStorageInfo.toDatanodeDescriptors(targets));
}
assertEquals("Size of pendingReplications ",
10, pendingReplications.size());
//
// remove one item and reinsert it
//
BlockInfo blk = genBlockInfo(8, 8, 0);
pendingReplications.decrement(blk, storages[7].getDatanodeDescriptor()); // removes one replica
assertEquals("pendingReplications.getNumReplicas ",
7, pendingReplications.getNumReplicas(blk));
for (int i = 0; i < 7; i++) {
// removes all replicas
pendingReplications.decrement(blk, storages[i].getDatanodeDescriptor());
}
assertTrue(pendingReplications.size() == 9);
pendingReplications.increment(blk,
DatanodeStorageInfo.toDatanodeDescriptors(
DFSTestUtil.createDatanodeStorageInfos(8)));
assertTrue(pendingReplications.size() == 10);
//
// verify that the number of replicas returned
// are sane.
//
for (int i = 0; i < 10; i++) {
BlockInfo block = genBlockInfo(i, i, 0);
int numReplicas = pendingReplications.getNumReplicas(block);
assertTrue(numReplicas == i);
}
//
// verify that nothing has timed out so far
//
assertTrue(pendingReplications.getTimedOutBlocks() == null);
//
// Wait for one second and then insert some more items.
//
try {
Thread.sleep(1000);
} catch (Exception e) {
}
for (int i = 10; i < 15; i++) {
BlockInfo block = genBlockInfo(i, i, 0);
pendingReplications.increment(block,
DatanodeStorageInfo.toDatanodeDescriptors(
DFSTestUtil.createDatanodeStorageInfos(i)));
}
assertTrue(pendingReplications.size() == 15);
//
// Wait for everything to timeout.
//
int loop = 0;
while (pendingReplications.size() > 0) {
try {
Thread.sleep(1000);
} catch (Exception e) {
}
loop++;
}
System.out.println("Had to wait for " + loop +
" seconds for the lot to timeout");
//
// Verify that everything has timed out.
//
assertEquals("Size of pendingReplications ", 0, pendingReplications.size());
Block[] timedOut = pendingReplications.getTimedOutBlocks();
assertTrue(timedOut != null && timedOut.length == 15);
for (int i = 0; i < timedOut.length; i++) {
assertTrue(timedOut[i].getBlockId() < 15);
}
pendingReplications.stop();
}
/* Test that processPendingReplications will use the most recent
* blockinfo from the blocksmap by placing a larger genstamp into
* the blocksmap.
*/
@Test
public void testProcessPendingReplications() throws Exception {
final Configuration conf = new HdfsConfiguration();
conf.setLong(
DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, TIMEOUT);
MiniDFSCluster cluster = null;
Block block;
BlockInfo blockInfo;
try {
cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_COUNT).build();
cluster.waitActive();
FSNamesystem fsn = cluster.getNamesystem();
BlockManager blkManager = fsn.getBlockManager();
PendingReplicationBlocks pendingReplications =
blkManager.pendingReplications;
UnderReplicatedBlocks neededReplications = blkManager.neededReplications;
BlocksMap blocksMap = blkManager.blocksMap;
//
// Add 1 block to pendingReplications with GenerationStamp = 0.
//
block = new Block(1, 1, 0);
blockInfo = new BlockInfoContiguous(block, (short) 3);
pendingReplications.increment(blockInfo,
DatanodeStorageInfo.toDatanodeDescriptors(
DFSTestUtil.createDatanodeStorageInfos(1)));
BlockCollection bc = Mockito.mock(BlockCollection.class);
Mockito.doReturn((short) 3).when(bc).getPreferredBlockReplication();
// Place into blocksmap with GenerationStamp = 1
blockInfo.setGenerationStamp(1);
blocksMap.addBlockCollection(blockInfo, bc);
assertEquals("Size of pendingReplications ", 1,
pendingReplications.size());
// Add a second block to pendingReplications that has no
// corresponding entry in blocksmap
block = new Block(2, 2, 0);
blockInfo = new BlockInfoContiguous(block, (short) 3);
pendingReplications.increment(blockInfo,
DatanodeStorageInfo.toDatanodeDescriptors(
DFSTestUtil.createDatanodeStorageInfos(1)));
// verify 2 blocks in pendingReplications
assertEquals("Size of pendingReplications ", 2,
pendingReplications.size());
//
// Wait for everything to timeout.
//
while (pendingReplications.size() > 0) {
try {
Thread.sleep(100);
} catch (Exception e) {
}
}
//
// Verify that block moves to neededReplications
//
while (neededReplications.size() == 0) {
try {
Thread.sleep(100);
} catch (Exception e) {
}
}
// Verify that the generation stamp we will try to replicate
// is now 1
for (Block b: neededReplications) {
assertEquals("Generation stamp is 1 ", 1,
b.getGenerationStamp());
}
// Verify size of neededReplications is exactly 1.
assertEquals("size of neededReplications is 1 ", 1,
neededReplications.size());
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
/**
* Test if DatanodeProtocol#blockReceivedAndDeleted can correctly update the
* pending replications. Also make sure the blockReceivedAndDeleted call is
* idempotent to the pending replications.
*/
@Test
public void testBlockReceived() throws Exception {
final Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(
DATANODE_COUNT).build();
cluster.waitActive();
DistributedFileSystem hdfs = cluster.getFileSystem();
FSNamesystem fsn = cluster.getNamesystem();
BlockManager blkManager = fsn.getBlockManager();
final String file = "/tmp.txt";
final Path filePath = new Path(file);
short replFactor = 1;
DFSTestUtil.createFile(hdfs, filePath, 1024L, replFactor, 0);
// temporarily stop the heartbeat
ArrayList<DataNode> datanodes = cluster.getDataNodes();
for (int i = 0; i < DATANODE_COUNT; i++) {
DataNodeTestUtils.setHeartbeatsDisabledForTests(datanodes.get(i), true);
}
hdfs.setReplication(filePath, (short) DATANODE_COUNT);
BlockManagerTestUtil.computeAllPendingWork(blkManager);
assertEquals(1, blkManager.pendingReplications.size());
INodeFile fileNode = fsn.getFSDirectory().getINode4Write(file).asFile();
BlockInfo[] blocks = fileNode.getBlocks();
assertEquals(DATANODE_COUNT - 1,
blkManager.pendingReplications.getNumReplicas(blocks[0]));
LocatedBlock locatedBlock = hdfs.getClient().getLocatedBlocks(file, 0)
.get(0);
DatanodeInfo existingDn = (locatedBlock.getLocations())[0];
int reportDnNum = 0;
String poolId = cluster.getNamesystem().getBlockPoolId();
// let two datanodes (other than the one that already has the data) to
// report to NN
for (int i = 0; i < DATANODE_COUNT && reportDnNum < 2; i++) {
if (!datanodes.get(i).getDatanodeId().equals(existingDn)) {
DatanodeRegistration dnR = datanodes.get(i).getDNRegistrationForBP(
poolId);
StorageReceivedDeletedBlocks[] report = {
new StorageReceivedDeletedBlocks("Fake-storage-ID-Ignored",
new ReceivedDeletedBlockInfo[] { new ReceivedDeletedBlockInfo(
blocks[0], BlockStatus.RECEIVED_BLOCK, "") }) };
cluster.getNameNodeRpc().blockReceivedAndDeleted(dnR, poolId, report);
reportDnNum++;
}
}
assertEquals(DATANODE_COUNT - 3,
blkManager.pendingReplications.getNumReplicas(blocks[0]));
// let the same datanodes report again
for (int i = 0; i < DATANODE_COUNT && reportDnNum < 2; i++) {
if (!datanodes.get(i).getDatanodeId().equals(existingDn)) {
DatanodeRegistration dnR = datanodes.get(i).getDNRegistrationForBP(
poolId);
StorageReceivedDeletedBlocks[] report =
{ new StorageReceivedDeletedBlocks("Fake-storage-ID-Ignored",
new ReceivedDeletedBlockInfo[] { new ReceivedDeletedBlockInfo(
blocks[0], BlockStatus.RECEIVED_BLOCK, "") }) };
cluster.getNameNodeRpc().blockReceivedAndDeleted(dnR, poolId, report);
reportDnNum++;
}
}
assertEquals(DATANODE_COUNT - 3,
blkManager.pendingReplications.getNumReplicas(blocks[0]));
// re-enable heartbeat for the datanode that has data
for (int i = 0; i < DATANODE_COUNT; i++) {
DataNodeTestUtils
.setHeartbeatsDisabledForTests(datanodes.get(i), false);
DataNodeTestUtils.triggerHeartbeat(datanodes.get(i));
}
Thread.sleep(5000);
assertEquals(0, blkManager.pendingReplications.size());
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
/**
* Test if BlockManager can correctly remove corresponding pending records
* when a file is deleted
*
* @throws Exception
*/
@Test
public void testPendingAndInvalidate() throws Exception {
final Configuration CONF = new HdfsConfiguration();
CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
DFS_REPLICATION_INTERVAL);
CONF.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY,
DFS_REPLICATION_INTERVAL);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(
DATANODE_COUNT).build();
cluster.waitActive();
FSNamesystem namesystem = cluster.getNamesystem();
BlockManager bm = namesystem.getBlockManager();
DistributedFileSystem fs = cluster.getFileSystem();
try {
// 1. create a file
Path filePath = new Path("/tmp.txt");
DFSTestUtil.createFile(fs, filePath, 1024, (short) 3, 0L);
// 2. disable the heartbeats
for (DataNode dn : cluster.getDataNodes()) {
DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
}
// 3. mark a couple of blocks as corrupt
LocatedBlock block = NameNodeAdapter.getBlockLocations(
cluster.getNameNode(), filePath.toString(), 0, 1).get(0);
cluster.getNamesystem().writeLock();
try {
bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
"STORAGE_ID", "TEST");
bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[1],
"STORAGE_ID", "TEST");
} finally {
cluster.getNamesystem().writeUnlock();
}
BlockManagerTestUtil.computeAllPendingWork(bm);
BlockManagerTestUtil.updateState(bm);
assertEquals(bm.getPendingReplicationBlocksCount(), 1L);
BlockInfo storedBlock = bm.getStoredBlock(block.getBlock().getLocalBlock());
assertEquals(bm.pendingReplications.getNumReplicas(storedBlock), 2);
// 4. delete the file
fs.delete(filePath, true);
// retry at most 10 times, each time sleep for 1s. Note that 10s is much
// less than the default pending record timeout (5~10min)
int retries = 10;
long pendingNum = bm.getPendingReplicationBlocksCount();
while (pendingNum != 0 && retries-- > 0) {
Thread.sleep(1000); // let NN do the deletion
BlockManagerTestUtil.updateState(bm);
pendingNum = bm.getPendingReplicationBlocksCount();
}
assertEquals(pendingNum, 0L);
} finally {
cluster.shutdown();
}
}
}
| 15,243 | 36.090024 | 99 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import java.io.IOException;
import java.net.URISyntaxException;
import java.net.URL;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.net.DNSToSwitchMapping;
import org.apache.hadoop.util.Shell;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.Mockito;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.*;
public class TestDatanodeManager {
public static final Log LOG = LogFactory.getLog(TestDatanodeManager.class);
//The number of times the registration / removal of nodes should happen
final int NUM_ITERATIONS = 500;
private static DatanodeManager mockDatanodeManager(
FSNamesystem fsn, Configuration conf) throws IOException {
BlockManager bm = Mockito.mock(BlockManager.class);
BlockReportLeaseManager blm = new BlockReportLeaseManager(conf);
Mockito.when(bm.getBlockReportLeaseManager()).thenReturn(blm);
DatanodeManager dm = new DatanodeManager(bm, fsn, conf);
return dm;
}
/**
* This test sends a random sequence of node registrations and node removals
* to the DatanodeManager (of nodes with different IDs and versions), and
* checks that the DatanodeManager keeps a correct count of different software
* versions at all times.
*/
@Test
public void testNumVersionsReportedCorrect() throws IOException {
//Create the DatanodeManager which will be tested
FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
Mockito.when(fsn.hasWriteLock()).thenReturn(true);
DatanodeManager dm = mockDatanodeManager(fsn, new Configuration());
//Seed the RNG with a known value so test failures are easier to reproduce
Random rng = new Random();
int seed = rng.nextInt();
rng = new Random(seed);
LOG.info("Using seed " + seed + " for testing");
//A map of the Storage IDs to the DN registration it was registered with
HashMap <String, DatanodeRegistration> sIdToDnReg =
new HashMap<String, DatanodeRegistration>();
for(int i=0; i<NUM_ITERATIONS; ++i) {
//If true, remove a node for every 3rd time (if there's one)
if(rng.nextBoolean() && i%3 == 0 && sIdToDnReg.size()!=0) {
//Pick a random node.
int randomIndex = rng.nextInt() % sIdToDnReg.size();
//Iterate to that random position
Iterator<Map.Entry<String, DatanodeRegistration>> it =
sIdToDnReg.entrySet().iterator();
for(int j=0; j<randomIndex-1; ++j) {
it.next();
}
DatanodeRegistration toRemove = it.next().getValue();
LOG.info("Removing node " + toRemove.getDatanodeUuid() + " ip " +
toRemove.getXferAddr() + " version : " + toRemove.getSoftwareVersion());
//Remove that random node
dm.removeDatanode(toRemove);
it.remove();
}
// Otherwise register a node. This node may be a new / an old one
else {
//Pick a random storageID to register.
String storageID = "someStorageID" + rng.nextInt(5000);
DatanodeRegistration dr = Mockito.mock(DatanodeRegistration.class);
Mockito.when(dr.getDatanodeUuid()).thenReturn(storageID);
//If this storageID had already been registered before
if(sIdToDnReg.containsKey(storageID)) {
dr = sIdToDnReg.get(storageID);
//Half of the times, change the IP address
if(rng.nextBoolean()) {
dr.setIpAddr(dr.getIpAddr() + "newIP");
}
} else { //This storageID has never been registered
//Ensure IP address is unique to storageID
String ip = "someIP" + storageID;
Mockito.when(dr.getIpAddr()).thenReturn(ip);
Mockito.when(dr.getXferAddr()).thenReturn(ip + ":9000");
Mockito.when(dr.getXferPort()).thenReturn(9000);
}
//Pick a random version to register with
Mockito.when(dr.getSoftwareVersion()).thenReturn(
"version" + rng.nextInt(5));
LOG.info("Registering node storageID: " + dr.getDatanodeUuid() +
", version: " + dr.getSoftwareVersion() + ", IP address: "
+ dr.getXferAddr());
//Register this random node
dm.registerDatanode(dr);
sIdToDnReg.put(storageID, dr);
}
//Verify DatanodeManager still has the right count
Map<String, Integer> mapToCheck = dm.getDatanodesSoftwareVersions();
//Remove counts from versions and make sure that after removing all nodes
//mapToCheck is empty
for(Entry<String, DatanodeRegistration> it: sIdToDnReg.entrySet()) {
String ver = it.getValue().getSoftwareVersion();
if(!mapToCheck.containsKey(ver)) {
throw new AssertionError("The correct number of datanodes of a "
+ "version was not found on iteration " + i);
}
mapToCheck.put(ver, mapToCheck.get(ver) - 1);
if(mapToCheck.get(ver) == 0) {
mapToCheck.remove(ver);
}
}
for(Entry <String, Integer> entry: mapToCheck.entrySet()) {
LOG.info("Still in map: " + entry.getKey() + " has "
+ entry.getValue());
}
assertEquals("The map of version counts returned by DatanodeManager was"
+ " not what it was expected to be on iteration " + i, 0,
mapToCheck.size());
}
}
@Test (timeout = 100000)
public void testRejectUnresolvedDatanodes() throws IOException {
//Create the DatanodeManager which will be tested
FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
Mockito.when(fsn.hasWriteLock()).thenReturn(true);
Configuration conf = new Configuration();
//Set configuration property for rejecting unresolved topology mapping
conf.setBoolean(
DFSConfigKeys.DFS_REJECT_UNRESOLVED_DN_TOPOLOGY_MAPPING_KEY, true);
//set TestDatanodeManager.MyResolver to be used for topology resolving
conf.setClass(
CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
TestDatanodeManager.MyResolver.class, DNSToSwitchMapping.class);
//create DatanodeManager
DatanodeManager dm = mockDatanodeManager(fsn, conf);
//storageID to register.
String storageID = "someStorageID-123";
DatanodeRegistration dr = Mockito.mock(DatanodeRegistration.class);
Mockito.when(dr.getDatanodeUuid()).thenReturn(storageID);
try {
//Register this node
dm.registerDatanode(dr);
Assert.fail("Expected an UnresolvedTopologyException");
} catch (UnresolvedTopologyException ute) {
LOG.info("Expected - topology is not resolved and " +
"registration is rejected.");
} catch (Exception e) {
Assert.fail("Expected an UnresolvedTopologyException");
}
}
/**
* MyResolver class provides resolve method which always returns null
* in order to simulate unresolved topology mapping.
*/
public static class MyResolver implements DNSToSwitchMapping {
@Override
public List<String> resolve(List<String> names) {
return null;
}
@Override
public void reloadCachedMappings() {
}
@Override
public void reloadCachedMappings(List<String> names) {
}
}
/**
* This test creates a LocatedBlock with 5 locations, sorts the locations
* based on the network topology, and ensures the locations are still aligned
* with the storage ids and storage types.
*/
@Test
public void testSortLocatedBlocks() throws IOException, URISyntaxException {
HelperFunction(null);
}
/**
* Execute a functional topology script and make sure that helper
* function works correctly
*
* @throws IOException
* @throws URISyntaxException
*/
@Test
public void testgoodScript() throws IOException, URISyntaxException {
HelperFunction("/" + Shell.appendScriptExtension("topology-script"));
}
/**
* Run a broken script and verify that helper function is able to
* ignore the broken script and work correctly
*
* @throws IOException
* @throws URISyntaxException
*/
@Test
public void testBadScript() throws IOException, URISyntaxException {
HelperFunction("/"+ Shell.appendScriptExtension("topology-broken-script"));
}
/**
* Helper function that tests the DatanodeManagers SortedBlock function
* we invoke this function with and without topology scripts
*
* @param scriptFileName - Script Name or null
*
* @throws URISyntaxException
* @throws IOException
*/
public void HelperFunction(String scriptFileName)
throws URISyntaxException, IOException {
// create the DatanodeManager which will be tested
Configuration conf = new Configuration();
FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
Mockito.when(fsn.hasWriteLock()).thenReturn(true);
if (scriptFileName != null && !scriptFileName.isEmpty()) {
URL shellScript = getClass().getResource(scriptFileName);
Path resourcePath = Paths.get(shellScript.toURI());
FileUtil.setExecutable(resourcePath.toFile(), true);
conf.set(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY,
resourcePath.toString());
}
DatanodeManager dm = mockDatanodeManager(fsn, conf);
// register 5 datanodes, each with different storage ID and type
DatanodeInfo[] locs = new DatanodeInfo[5];
String[] storageIDs = new String[5];
StorageType[] storageTypes = new StorageType[]{
StorageType.ARCHIVE,
StorageType.DEFAULT,
StorageType.DISK,
StorageType.RAM_DISK,
StorageType.SSD
};
for (int i = 0; i < 5; i++) {
// register new datanode
String uuid = "UUID-" + i;
String ip = "IP-" + i;
DatanodeRegistration dr = Mockito.mock(DatanodeRegistration.class);
Mockito.when(dr.getDatanodeUuid()).thenReturn(uuid);
Mockito.when(dr.getIpAddr()).thenReturn(ip);
Mockito.when(dr.getXferAddr()).thenReturn(ip + ":9000");
Mockito.when(dr.getXferPort()).thenReturn(9000);
Mockito.when(dr.getSoftwareVersion()).thenReturn("version1");
dm.registerDatanode(dr);
// get location and storage information
locs[i] = dm.getDatanode(uuid);
storageIDs[i] = "storageID-" + i;
}
// set first 2 locations as decomissioned
locs[0].setDecommissioned();
locs[1].setDecommissioned();
// create LocatedBlock with above locations
ExtendedBlock b = new ExtendedBlock("somePoolID", 1234);
LocatedBlock block = new LocatedBlock(b, locs, storageIDs, storageTypes);
List<LocatedBlock> blocks = new ArrayList<>();
blocks.add(block);
final String targetIp = locs[4].getIpAddr();
// sort block locations
dm.sortLocatedBlocks(targetIp, blocks);
// check that storage IDs/types are aligned with datanode locs
DatanodeInfo[] sortedLocs = block.getLocations();
storageIDs = block.getStorageIDs();
storageTypes = block.getStorageTypes();
assertThat(sortedLocs.length, is(5));
assertThat(storageIDs.length, is(5));
assertThat(storageTypes.length, is(5));
for (int i = 0; i < sortedLocs.length; i++) {
assertThat(((DatanodeInfoWithStorage) sortedLocs[i]).getStorageID(),
is(storageIDs[i]));
assertThat(((DatanodeInfoWithStorage) sortedLocs[i]).getStorageType(),
is(storageTypes[i]));
}
// Ensure the local node is first.
assertThat(sortedLocs[0].getIpAddr(), is(targetIp));
// Ensure the two decommissioned DNs were moved to the end.
assertThat(sortedLocs[sortedLocs.length - 1].getAdminState(),
is(DatanodeInfo.AdminStates.DECOMMISSIONED));
assertThat(sortedLocs[sortedLocs.length - 2].getAdminState(),
is(DatanodeInfo.AdminStates.DECOMMISSIONED));
}
}
| 13,382 | 36.487395 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.reset;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.verify;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.LinkedList;
import java.util.List;
import java.util.Map.Entry;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.net.NetworkTopology;
import org.junit.Assert;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.LinkedListMultimap;
import com.google.common.collect.Lists;
public class TestBlockManager {
private DatanodeStorageInfo[] storages;
private List<DatanodeDescriptor> nodes;
private List<DatanodeDescriptor> rackA;
private List<DatanodeDescriptor> rackB;
/**
* Some of these tests exercise code which has some randomness involved -
* ie even if there's a bug, they may pass because the random node selection
* chooses the correct result.
*
* Since they're true unit tests and run quickly, we loop them a number
* of times trying to trigger the incorrect behavior.
*/
private static final int NUM_TEST_ITERS = 30;
private static final int BLOCK_SIZE = 64*1024;
private FSNamesystem fsn;
private BlockManager bm;
@Before
public void setupMockCluster() throws IOException {
Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY,
"need to set a dummy value here so it assumes a multi-rack cluster");
fsn = Mockito.mock(FSNamesystem.class);
Mockito.doReturn(true).when(fsn).hasWriteLock();
bm = new BlockManager(fsn, conf);
final String[] racks = {
"/rackA",
"/rackA",
"/rackA",
"/rackB",
"/rackB",
"/rackB"};
storages = DFSTestUtil.createDatanodeStorageInfos(racks);
nodes = Arrays.asList(DFSTestUtil.toDatanodeDescriptor(storages));
rackA = nodes.subList(0, 3);
rackB = nodes.subList(3, 6);
}
private void addNodes(Iterable<DatanodeDescriptor> nodesToAdd) {
NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
// construct network topology
for (DatanodeDescriptor dn : nodesToAdd) {
cluster.add(dn);
dn.getStorageInfos()[0].setUtilizationForTesting(
2 * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
2 * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L);
dn.updateHeartbeat(
BlockManagerTestUtil.getStorageReportsForDatanode(dn), 0L, 0L, 0, 0,
null);
bm.getDatanodeManager().checkIfClusterIsNowMultiRack(dn);
}
}
private void removeNode(DatanodeDescriptor deadNode) {
NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
cluster.remove(deadNode);
bm.removeBlocksAssociatedTo(deadNode);
}
/**
* Test that replication of under-replicated blocks is detected
* and basically works
*/
@Test
public void testBasicReplication() throws Exception {
addNodes(nodes);
for (int i = 0; i < NUM_TEST_ITERS; i++) {
doBasicTest(i);
}
}
private void doBasicTest(int testIndex) {
List<DatanodeStorageInfo> origStorages = getStorages(0, 1);
List<DatanodeDescriptor> origNodes = getNodes(origStorages);
BlockInfo blockInfo = addBlockOnNodes(testIndex, origNodes);
DatanodeStorageInfo[] pipeline = scheduleSingleReplication(blockInfo);
assertEquals(2, pipeline.length);
assertTrue("Source of replication should be one of the nodes the block " +
"was on. Was: " + pipeline[0],
origStorages.contains(pipeline[0]));
assertTrue("Destination of replication should be on the other rack. " +
"Was: " + pipeline[1],
rackB.contains(pipeline[1].getDatanodeDescriptor()));
}
/**
* Regression test for HDFS-1480
* - Cluster has 2 racks, A and B, each with three nodes.
* - Block initially written on A1, A2, B1
* - Admin decommissions two of these nodes (let's say A1 and A2 but it doesn't matter)
* - Re-replication should respect rack policy
*/
@Test
public void testTwoOfThreeNodesDecommissioned() throws Exception {
addNodes(nodes);
for (int i = 0; i < NUM_TEST_ITERS; i++) {
doTestTwoOfThreeNodesDecommissioned(i);
}
}
private void doTestTwoOfThreeNodesDecommissioned(int testIndex) throws Exception {
// Block originally on A1, A2, B1
List<DatanodeStorageInfo> origStorages = getStorages(0, 1, 3);
List<DatanodeDescriptor> origNodes = getNodes(origStorages);
BlockInfo blockInfo = addBlockOnNodes(testIndex, origNodes);
// Decommission two of the nodes (A1, A2)
List<DatanodeDescriptor> decomNodes = startDecommission(0, 1);
DatanodeStorageInfo[] pipeline = scheduleSingleReplication(blockInfo);
assertTrue("Source of replication should be one of the nodes the block " +
"was on. Was: " + pipeline[0],
origStorages.contains(pipeline[0]));
assertEquals("Should have three targets", 3, pipeline.length);
boolean foundOneOnRackA = false;
for (int i = 1; i < pipeline.length; i++) {
DatanodeDescriptor target = pipeline[i].getDatanodeDescriptor();
if (rackA.contains(target)) {
foundOneOnRackA = true;
}
assertFalse(decomNodes.contains(target));
assertFalse(origNodes.contains(target));
}
assertTrue("Should have at least one target on rack A. Pipeline: " +
Joiner.on(",").join(pipeline),
foundOneOnRackA);
}
/**
* Test what happens when a block is on three nodes, and all three of those
* nodes are decommissioned. It should properly re-replicate to three new
* nodes.
*/
@Test
public void testAllNodesHoldingReplicasDecommissioned() throws Exception {
addNodes(nodes);
for (int i = 0; i < NUM_TEST_ITERS; i++) {
doTestAllNodesHoldingReplicasDecommissioned(i);
}
}
private void doTestAllNodesHoldingReplicasDecommissioned(int testIndex) throws Exception {
// Block originally on A1, A2, B1
List<DatanodeStorageInfo> origStorages = getStorages(0, 1, 3);
List<DatanodeDescriptor> origNodes = getNodes(origStorages);
BlockInfo blockInfo = addBlockOnNodes(testIndex, origNodes);
// Decommission all of the nodes
List<DatanodeDescriptor> decomNodes = startDecommission(0, 1, 3);
DatanodeStorageInfo[] pipeline = scheduleSingleReplication(blockInfo);
assertTrue("Source of replication should be one of the nodes the block " +
"was on. Was: " + pipeline[0],
origStorages.contains(pipeline[0]));
assertEquals("Should have three targets", 4, pipeline.length);
boolean foundOneOnRackA = false;
boolean foundOneOnRackB = false;
for (int i = 1; i < pipeline.length; i++) {
DatanodeDescriptor target = pipeline[i].getDatanodeDescriptor();
if (rackA.contains(target)) {
foundOneOnRackA = true;
} else if (rackB.contains(target)) {
foundOneOnRackB = true;
}
assertFalse(decomNodes.contains(target));
assertFalse(origNodes.contains(target));
}
assertTrue("Should have at least one target on rack A. Pipeline: " +
Joiner.on(",").join(pipeline),
foundOneOnRackA);
assertTrue("Should have at least one target on rack B. Pipeline: " +
Joiner.on(",").join(pipeline),
foundOneOnRackB);
}
/**
* Test what happens when there are two racks, and an entire rack is
* decommissioned.
*
* Since the cluster is multi-rack, it will consider the block
* under-replicated rather than create a third replica on the
* same rack. Adding a new node on a third rack should cause re-replication
* to that node.
*/
@Test
public void testOneOfTwoRacksDecommissioned() throws Exception {
addNodes(nodes);
for (int i = 0; i < NUM_TEST_ITERS; i++) {
doTestOneOfTwoRacksDecommissioned(i);
}
}
private void doTestOneOfTwoRacksDecommissioned(int testIndex) throws Exception {
// Block originally on A1, A2, B1
List<DatanodeStorageInfo> origStorages = getStorages(0, 1, 3);
List<DatanodeDescriptor> origNodes = getNodes(origStorages);
BlockInfo blockInfo = addBlockOnNodes(testIndex, origNodes);
// Decommission all of the nodes in rack A
List<DatanodeDescriptor> decomNodes = startDecommission(0, 1, 2);
DatanodeStorageInfo[] pipeline = scheduleSingleReplication(blockInfo);
assertTrue("Source of replication should be one of the nodes the block " +
"was on. Was: " + pipeline[0],
origStorages.contains(pipeline[0]));
// Only up to two nodes can be picked per rack when there are two racks.
assertEquals("Should have two targets", 2, pipeline.length);
boolean foundOneOnRackB = false;
for (int i = 1; i < pipeline.length; i++) {
DatanodeDescriptor target = pipeline[i].getDatanodeDescriptor();
if (rackB.contains(target)) {
foundOneOnRackB = true;
}
assertFalse(decomNodes.contains(target));
assertFalse(origNodes.contains(target));
}
assertTrue("Should have at least one target on rack B. Pipeline: " +
Joiner.on(",").join(pipeline),
foundOneOnRackB);
// Mark the block as received on the target nodes in the pipeline
fulfillPipeline(blockInfo, pipeline);
// the block is still under-replicated. Add a new node. This should allow
// the third off-rack replica.
DatanodeDescriptor rackCNode =
DFSTestUtil.getDatanodeDescriptor("7.7.7.7", "/rackC");
rackCNode.updateStorage(new DatanodeStorage(DatanodeStorage.generateUuid()));
addNodes(ImmutableList.of(rackCNode));
try {
DatanodeStorageInfo[] pipeline2 = scheduleSingleReplication(blockInfo);
assertEquals(2, pipeline2.length);
assertEquals(rackCNode, pipeline2[1].getDatanodeDescriptor());
} finally {
removeNode(rackCNode);
}
}
/**
* Unit test version of testSufficientlyReplBlocksUsesNewRack from
* {@link TestBlocksWithNotEnoughRacks}.
**/
@Test
public void testSufficientlyReplBlocksUsesNewRack() throws Exception {
addNodes(nodes);
for (int i = 0; i < NUM_TEST_ITERS; i++) {
doTestSufficientlyReplBlocksUsesNewRack(i);
}
}
private void doTestSufficientlyReplBlocksUsesNewRack(int testIndex) {
// Originally on only nodes in rack A.
List<DatanodeDescriptor> origNodes = rackA;
BlockInfo blockInfo = addBlockOnNodes(testIndex, origNodes);
DatanodeStorageInfo pipeline[] = scheduleSingleReplication(blockInfo);
assertEquals(2, pipeline.length); // single new copy
assertTrue("Source of replication should be one of the nodes the block " +
"was on. Was: " + pipeline[0],
origNodes.contains(pipeline[0].getDatanodeDescriptor()));
assertTrue("Destination of replication should be on the other rack. " +
"Was: " + pipeline[1],
rackB.contains(pipeline[1].getDatanodeDescriptor()));
}
@Test
public void testBlocksAreNotUnderreplicatedInSingleRack() throws Exception {
List<DatanodeDescriptor> nodes = ImmutableList.of(
BlockManagerTestUtil.getDatanodeDescriptor("1.1.1.1", "/rackA", true),
BlockManagerTestUtil.getDatanodeDescriptor("2.2.2.2", "/rackA", true),
BlockManagerTestUtil.getDatanodeDescriptor("3.3.3.3", "/rackA", true),
BlockManagerTestUtil.getDatanodeDescriptor("4.4.4.4", "/rackA", true),
BlockManagerTestUtil.getDatanodeDescriptor("5.5.5.5", "/rackA", true),
BlockManagerTestUtil.getDatanodeDescriptor("6.6.6.6", "/rackA", true)
);
addNodes(nodes);
List<DatanodeDescriptor> origNodes = nodes.subList(0, 3);
for (int i = 0; i < NUM_TEST_ITERS; i++) {
doTestSingleRackClusterIsSufficientlyReplicated(i, origNodes);
}
}
private void doTestSingleRackClusterIsSufficientlyReplicated(int testIndex,
List<DatanodeDescriptor> origNodes)
throws Exception {
assertEquals(0, bm.numOfUnderReplicatedBlocks());
addBlockOnNodes(testIndex, origNodes);
bm.processMisReplicatedBlocks();
assertEquals(0, bm.numOfUnderReplicatedBlocks());
}
/**
* Tell the block manager that replication is completed for the given
* pipeline.
*/
private void fulfillPipeline(BlockInfo blockInfo,
DatanodeStorageInfo[] pipeline) throws IOException {
for (int i = 1; i < pipeline.length; i++) {
DatanodeStorageInfo storage = pipeline[i];
bm.addBlock(storage, blockInfo, null);
blockInfo.addStorage(storage);
}
}
private BlockInfo blockOnNodes(long blkId, List<DatanodeDescriptor> nodes) {
Block block = new Block(blkId);
BlockInfo blockInfo = new BlockInfoContiguous(block, (short) 3);
for (DatanodeDescriptor dn : nodes) {
for (DatanodeStorageInfo storage : dn.getStorageInfos()) {
blockInfo.addStorage(storage);
}
}
return blockInfo;
}
private List<DatanodeDescriptor> getNodes(int ... indexes) {
List<DatanodeDescriptor> ret = Lists.newArrayList();
for (int idx : indexes) {
ret.add(nodes.get(idx));
}
return ret;
}
private List<DatanodeDescriptor> getNodes(List<DatanodeStorageInfo> storages) {
List<DatanodeDescriptor> ret = Lists.newArrayList();
for (DatanodeStorageInfo s : storages) {
ret.add(s.getDatanodeDescriptor());
}
return ret;
}
private List<DatanodeStorageInfo> getStorages(int ... indexes) {
List<DatanodeStorageInfo> ret = Lists.newArrayList();
for (int idx : indexes) {
ret.add(storages[idx]);
}
return ret;
}
private List<DatanodeDescriptor> startDecommission(int ... indexes) {
List<DatanodeDescriptor> nodes = getNodes(indexes);
for (DatanodeDescriptor node : nodes) {
node.startDecommission();
}
return nodes;
}
private BlockInfo addBlockOnNodes(long blockId, List<DatanodeDescriptor> nodes) {
BlockCollection bc = Mockito.mock(BlockCollection.class);
Mockito.doReturn((short)3).when(bc).getPreferredBlockReplication();
BlockInfo blockInfo = blockOnNodes(blockId, nodes);
bm.blocksMap.addBlockCollection(blockInfo, bc);
return blockInfo;
}
private DatanodeStorageInfo[] scheduleSingleReplication(BlockInfo block) {
// list for priority 1
List<BlockInfo> list_p1 = new ArrayList<>();
list_p1.add(block);
// list of lists for each priority
List<List<BlockInfo>> list_all = new ArrayList<>();
list_all.add(new ArrayList<BlockInfo>()); // for priority 0
list_all.add(list_p1); // for priority 1
assertEquals("Block not initially pending replication", 0,
bm.pendingReplications.getNumReplicas(block));
assertEquals(
"computeReplicationWork should indicate replication is needed", 1,
bm.computeReplicationWorkForBlocks(list_all));
assertTrue("replication is pending after work is computed",
bm.pendingReplications.getNumReplicas(block) > 0);
LinkedListMultimap<DatanodeStorageInfo, BlockTargetPair> repls = getAllPendingReplications();
assertEquals(1, repls.size());
Entry<DatanodeStorageInfo, BlockTargetPair> repl =
repls.entries().iterator().next();
DatanodeStorageInfo[] targets = repl.getValue().targets;
DatanodeStorageInfo[] pipeline = new DatanodeStorageInfo[1 + targets.length];
pipeline[0] = repl.getKey();
System.arraycopy(targets, 0, pipeline, 1, targets.length);
return pipeline;
}
private LinkedListMultimap<DatanodeStorageInfo, BlockTargetPair> getAllPendingReplications() {
LinkedListMultimap<DatanodeStorageInfo, BlockTargetPair> repls =
LinkedListMultimap.create();
for (DatanodeDescriptor dn : nodes) {
List<BlockTargetPair> thisRepls = dn.getReplicationCommand(10);
if (thisRepls != null) {
for(DatanodeStorageInfo storage : dn.getStorageInfos()) {
repls.putAll(storage, thisRepls);
}
}
}
return repls;
}
/**
* Test that a source node for a highest-priority replication is chosen even if all available
* source nodes have reached their replication limits.
*/
@Test
public void testHighestPriReplSrcChosenDespiteMaxReplLimit() throws Exception {
bm.maxReplicationStreams = 0;
bm.replicationStreamsHardLimit = 1;
long blockId = 42; // arbitrary
Block aBlock = new Block(blockId, 0, 0);
List<DatanodeDescriptor> origNodes = getNodes(0, 1);
// Add the block to the first node.
addBlockOnNodes(blockId,origNodes.subList(0,1));
List<DatanodeDescriptor> cntNodes = new LinkedList<DatanodeDescriptor>();
List<DatanodeStorageInfo> liveNodes = new LinkedList<DatanodeStorageInfo>();
assertNotNull("Chooses source node for a highest-priority replication"
+ " even if all available source nodes have reached their replication"
+ " limits below the hard limit.",
bm.chooseSourceDatanode(
aBlock,
cntNodes,
liveNodes,
new NumberReplicas(),
UnderReplicatedBlocks.QUEUE_HIGHEST_PRIORITY));
assertNull("Does not choose a source node for a less-than-highest-priority"
+ " replication since all available source nodes have reached"
+ " their replication limits.",
bm.chooseSourceDatanode(
aBlock,
cntNodes,
liveNodes,
new NumberReplicas(),
UnderReplicatedBlocks.QUEUE_VERY_UNDER_REPLICATED));
// Increase the replication count to test replication count > hard limit
DatanodeStorageInfo targets[] = { origNodes.get(1).getStorageInfos()[0] };
origNodes.get(0).addBlockToBeReplicated(aBlock, targets);
assertNull("Does not choose a source node for a highest-priority"
+ " replication when all available nodes exceed the hard limit.",
bm.chooseSourceDatanode(
aBlock,
cntNodes,
liveNodes,
new NumberReplicas(),
UnderReplicatedBlocks.QUEUE_HIGHEST_PRIORITY));
}
@Test
public void testFavorDecomUntilHardLimit() throws Exception {
bm.maxReplicationStreams = 0;
bm.replicationStreamsHardLimit = 1;
long blockId = 42; // arbitrary
Block aBlock = new Block(blockId, 0, 0);
List<DatanodeDescriptor> origNodes = getNodes(0, 1);
// Add the block to the first node.
addBlockOnNodes(blockId,origNodes.subList(0,1));
origNodes.get(0).startDecommission();
List<DatanodeDescriptor> cntNodes = new LinkedList<DatanodeDescriptor>();
List<DatanodeStorageInfo> liveNodes = new LinkedList<DatanodeStorageInfo>();
assertNotNull("Chooses decommissioning source node for a normal replication"
+ " if all available source nodes have reached their replication"
+ " limits below the hard limit.",
bm.chooseSourceDatanode(
aBlock,
cntNodes,
liveNodes,
new NumberReplicas(),
UnderReplicatedBlocks.QUEUE_UNDER_REPLICATED));
// Increase the replication count to test replication count > hard limit
DatanodeStorageInfo targets[] = { origNodes.get(1).getStorageInfos()[0] };
origNodes.get(0).addBlockToBeReplicated(aBlock, targets);
assertNull("Does not choose a source decommissioning node for a normal"
+ " replication when all available nodes exceed the hard limit.",
bm.chooseSourceDatanode(
aBlock,
cntNodes,
liveNodes,
new NumberReplicas(),
UnderReplicatedBlocks.QUEUE_UNDER_REPLICATED));
}
@Test
public void testSafeModeIBR() throws Exception {
DatanodeDescriptor node = spy(nodes.get(0));
DatanodeStorageInfo ds = node.getStorageInfos()[0];
node.isAlive = true;
DatanodeRegistration nodeReg =
new DatanodeRegistration(node, null, null, "");
// pretend to be in safemode
doReturn(true).when(fsn).isInStartupSafeMode();
// register new node
bm.getDatanodeManager().registerDatanode(nodeReg);
bm.getDatanodeManager().addDatanode(node); // swap in spy
assertEquals(node, bm.getDatanodeManager().getDatanode(node));
assertEquals(0, ds.getBlockReportCount());
// send block report, should be processed
reset(node);
bm.processReport(node, new DatanodeStorage(ds.getStorageID()),
BlockListAsLongs.EMPTY, null, false);
assertEquals(1, ds.getBlockReportCount());
// send block report again, should NOT be processed
reset(node);
bm.processReport(node, new DatanodeStorage(ds.getStorageID()),
BlockListAsLongs.EMPTY, null, false);
assertEquals(1, ds.getBlockReportCount());
// re-register as if node restarted, should update existing node
bm.getDatanodeManager().removeDatanode(node);
reset(node);
bm.getDatanodeManager().registerDatanode(nodeReg);
verify(node).updateRegInfo(nodeReg);
// send block report, should be processed after restart
reset(node);
bm.processReport(node, new DatanodeStorage(ds.getStorageID()),
BlockListAsLongs.EMPTY, null, false);
// Reinitialize as registration with empty storage list pruned
// node.storageMap.
ds = node.getStorageInfos()[0];
assertEquals(1, ds.getBlockReportCount());
}
@Test
public void testSafeModeIBRAfterIncremental() throws Exception {
DatanodeDescriptor node = spy(nodes.get(0));
DatanodeStorageInfo ds = node.getStorageInfos()[0];
node.isAlive = true;
DatanodeRegistration nodeReg =
new DatanodeRegistration(node, null, null, "");
// pretend to be in safemode
doReturn(true).when(fsn).isInStartupSafeMode();
// register new node
bm.getDatanodeManager().registerDatanode(nodeReg);
bm.getDatanodeManager().addDatanode(node); // swap in spy
assertEquals(node, bm.getDatanodeManager().getDatanode(node));
assertEquals(0, ds.getBlockReportCount());
// send block report while pretending to already have blocks
reset(node);
doReturn(1).when(node).numBlocks();
bm.processReport(node, new DatanodeStorage(ds.getStorageID()),
BlockListAsLongs.EMPTY, null, false);
assertEquals(1, ds.getBlockReportCount());
}
/**
* test when NN starts and in same mode, it receives an incremental blockReport
* firstly. Then receives first full block report.
*/
@Test
public void testSafeModeIBRBeforeFirstFullBR() throws Exception {
// pretend to be in safemode
doReturn(true).when(fsn).isInStartupSafeMode();
DatanodeDescriptor node = nodes.get(0);
DatanodeStorageInfo ds = node.getStorageInfos()[0];
node.isAlive = true;
DatanodeRegistration nodeReg = new DatanodeRegistration(node, null, null, "");
// register new node
bm.getDatanodeManager().registerDatanode(nodeReg);
bm.getDatanodeManager().addDatanode(node);
assertEquals(node, bm.getDatanodeManager().getDatanode(node));
assertEquals(0, ds.getBlockReportCount());
// Build a incremental report
List<ReceivedDeletedBlockInfo> rdbiList = new ArrayList<>();
// Build a full report
BlockListAsLongs.Builder builder = BlockListAsLongs.builder();
// blk_42 is finalized.
long receivedBlockId = 42; // arbitrary
BlockInfo receivedBlock = addBlockToBM(receivedBlockId);
rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivedBlock),
ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null));
builder.add(new FinalizedReplica(receivedBlock, null, null));
// blk_43 is under construction.
long receivingBlockId = 43;
BlockInfo receivingBlock = addUcBlockToBM(receivingBlockId);
rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivingBlock),
ReceivedDeletedBlockInfo.BlockStatus.RECEIVING_BLOCK, null));
builder.add(new ReplicaBeingWritten(receivingBlock, null, null, null));
// blk_44 has 2 records in IBR. It's finalized. So full BR has 1 record.
long receivingReceivedBlockId = 44;
BlockInfo receivingReceivedBlock = addBlockToBM(receivingReceivedBlockId);
rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivingReceivedBlock),
ReceivedDeletedBlockInfo.BlockStatus.RECEIVING_BLOCK, null));
rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivingReceivedBlock),
ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null));
builder.add(new FinalizedReplica(receivingReceivedBlock, null, null));
// blk_45 is not in full BR, because it's deleted.
long ReceivedDeletedBlockId = 45;
rdbiList.add(new ReceivedDeletedBlockInfo(
new Block(ReceivedDeletedBlockId),
ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null));
rdbiList.add(new ReceivedDeletedBlockInfo(
new Block(ReceivedDeletedBlockId),
ReceivedDeletedBlockInfo.BlockStatus.DELETED_BLOCK, null));
// blk_46 exists in DN for a long time, so it's in full BR, but not in IBR.
long existedBlockId = 46;
BlockInfo existedBlock = addBlockToBM(existedBlockId);
builder.add(new FinalizedReplica(existedBlock, null, null));
// process IBR and full BR
StorageReceivedDeletedBlocks srdb =
new StorageReceivedDeletedBlocks(new DatanodeStorage(ds.getStorageID()),
rdbiList.toArray(new ReceivedDeletedBlockInfo[rdbiList.size()]));
bm.processIncrementalBlockReport(node, srdb);
// Make sure it's the first full report
assertEquals(0, ds.getBlockReportCount());
bm.processReport(node, new DatanodeStorage(ds.getStorageID()),
builder.build(), null, false);
assertEquals(1, ds.getBlockReportCount());
// verify the storage info is correct
assertTrue(bm.getStoredBlock(new Block(receivedBlockId)).findStorageInfo
(ds) >= 0);
assertTrue(((BlockInfoContiguousUnderConstruction) bm.
getStoredBlock(new Block(receivingBlockId))).getNumExpectedLocations() > 0);
assertTrue(bm.getStoredBlock(new Block(receivingReceivedBlockId))
.findStorageInfo(ds) >= 0);
assertNull(bm.getStoredBlock(new Block(ReceivedDeletedBlockId)));
assertTrue(bm.getStoredBlock(new Block(existedBlock)).findStorageInfo
(ds) >= 0);
}
private BlockInfo addBlockToBM(long blkId) {
Block block = new Block(blkId);
BlockInfo blockInfo =
new BlockInfoContiguous(block, (short) 3);
BlockCollection bc = Mockito.mock(BlockCollection.class);
Mockito.doReturn((short) 3).when(bc).getPreferredBlockReplication();
bm.blocksMap.addBlockCollection(blockInfo, bc);
return blockInfo;
}
private BlockInfo addUcBlockToBM(long blkId) {
Block block = new Block(blkId);
BlockInfoContiguousUnderConstruction blockInfo =
new BlockInfoContiguousUnderConstruction(block, (short) 3);
BlockCollection bc = Mockito.mock(BlockCollection.class);
Mockito.doReturn((short) 3).when(bc).getPreferredBlockReplication();
bm.blocksMap.addBlockCollection(blockInfo, bc);
return blockInfo;
}
/**
* Tests that a namenode doesn't choose a datanode with full disks to
* store blocks.
* @throws Exception
*/
@Test
public void testStorageWithRemainingCapacity() throws Exception {
final Configuration conf = new HdfsConfiguration();
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = FileSystem.get(conf);
Path file1 = null;
try {
cluster.waitActive();
final FSNamesystem namesystem = cluster.getNamesystem();
final String poolId = namesystem.getBlockPoolId();
final DatanodeRegistration nodeReg =
DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().
get(0), poolId);
final DatanodeDescriptor dd = NameNodeAdapter.getDatanode(namesystem,
nodeReg);
// By default, MiniDFSCluster will create 1 datanode with 2 storages.
// Assigning 64k for remaining storage capacity and will
//create a file with 100k.
for(DatanodeStorageInfo storage: dd.getStorageInfos()) {
storage.setUtilizationForTesting(65536, 0, 65536, 0);
}
//sum of the remaining capacity of both the storages
dd.setRemaining(131072);
file1 = new Path("testRemainingStorage.dat");
try {
DFSTestUtil.createFile(fs, file1, 102400, 102400, 102400, (short)1,
0x1BAD5EED);
}
catch (RemoteException re) {
GenericTestUtils.assertExceptionContains("nodes instead of "
+ "minReplication", re);
}
}
finally {
// Clean up
assertTrue(fs.exists(file1));
fs.delete(file1, true);
assertTrue(!fs.exists(file1));
cluster.shutdown();
}
}
@Test
public void testUseDelHint() {
DatanodeStorageInfo delHint = new DatanodeStorageInfo(
DFSTestUtil.getLocalDatanodeDescriptor(), new DatanodeStorage("id"));
List<DatanodeStorageInfo> moreThan1Racks = Arrays.asList(delHint);
List<StorageType> excessTypes = new ArrayList<StorageType>();
excessTypes.add(StorageType.DEFAULT);
Assert.assertTrue(BlockManager.useDelHint(true, delHint, null,
moreThan1Racks, excessTypes));
excessTypes.remove(0);
excessTypes.add(StorageType.SSD);
Assert.assertFalse(BlockManager.useDelHint(true, delHint, null,
moreThan1Racks, excessTypes));
}
}
| 31,836 | 37.778319 | 97 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.io.InputStream;
import java.net.URL;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.mortbay.util.ajax.JSON;
/**
* Class for testing {@link BlockStatsMXBean} implementation
*/
public class TestBlockStatsMXBean {
private MiniDFSCluster cluster;
@Before
public void setup() throws IOException {
HdfsConfiguration conf = new HdfsConfiguration();
cluster = null;
StorageType[][] types = new StorageType[6][];
for (int i=0; i<3; i++) {
types[i] = new StorageType[] {StorageType.RAM_DISK, StorageType.DISK};
}
for (int i=3; i< 5; i++) {
types[i] = new StorageType[] {StorageType.RAM_DISK, StorageType.ARCHIVE};
}
types[5] = new StorageType[] {StorageType.RAM_DISK, StorageType.ARCHIVE,
StorageType.ARCHIVE};
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(6).
storageTypes(types).storagesPerDatanode(3).build();
cluster.waitActive();
}
@After
public void tearDown() {
if (cluster != null) {
cluster.shutdown();
}
}
@Test
public void testStorageTypeStats() throws Exception {
Map<StorageType, StorageTypeStats> storageTypeStatsMap =
cluster.getNamesystem().getBlockManager().getStorageTypeStats();
assertTrue(storageTypeStatsMap.containsKey(StorageType.RAM_DISK));
assertTrue(storageTypeStatsMap.containsKey(StorageType.DISK));
assertTrue(storageTypeStatsMap.containsKey(StorageType.ARCHIVE));
StorageTypeStats storageTypeStats =
storageTypeStatsMap.get(StorageType.RAM_DISK);
assertEquals(6, storageTypeStats.getNodesInService());
storageTypeStats = storageTypeStatsMap.get(StorageType.DISK);
assertEquals(3, storageTypeStats.getNodesInService());
storageTypeStats = storageTypeStatsMap.get(StorageType.ARCHIVE);
assertEquals(3, storageTypeStats.getNodesInService());
}
protected static String readOutput(URL url) throws IOException {
StringBuilder out = new StringBuilder();
InputStream in = url.openConnection().getInputStream();
byte[] buffer = new byte[64 * 1024];
int len = in.read(buffer);
while (len > 0) {
out.append(new String(buffer, 0, len));
len = in.read(buffer);
}
return out.toString();
}
@Test
@SuppressWarnings("unchecked")
public void testStorageTypeStatsJMX() throws Exception {
URL baseUrl = new URL (cluster.getHttpUri(0));
String result = readOutput(new URL(baseUrl, "/jmx"));
Map<String, Object> stat = (Map<String, Object>) JSON.parse(result);
Object[] beans =(Object[]) stat.get("beans");
Map<String, Object> blockStats = null;
for (Object bean : beans) {
Map<String, Object> map = (Map<String, Object>) bean;
if (map.get("name").equals("Hadoop:service=NameNode,name=BlockStats")) {
blockStats = map;
}
}
assertNotNull(blockStats);
Object[] storageTypeStatsList =
(Object[])blockStats.get("StorageTypeStats");
assertNotNull(storageTypeStatsList);
assertEquals (3, storageTypeStatsList.length);
Set<String> typesPresent = new HashSet<> ();
for (Object obj : storageTypeStatsList) {
Map<String, Object> entry = (Map<String, Object>)obj;
String storageType = (String)entry.get("key");
Map<String,Object> storageTypeStats = (Map<String,Object>)entry.get("value");
typesPresent.add(storageType);
if (storageType.equals("ARCHIVE") || storageType.equals("DISK") ) {
assertEquals(3l, storageTypeStats.get("nodesInService"));
} else if (storageType.equals("RAM_DISK")) {
assertEquals(6l, storageTypeStats.get("nodesInService"));
}
else {
fail();
}
}
assertTrue(typesPresent.contains("ARCHIVE"));
assertTrue(typesPresent.contains("DISK"));
assertTrue(typesPresent.contains("RAM_DISK"));
}
}
| 5,132 | 34.157534 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import static org.apache.hadoop.util.Time.monotonicNow;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.util.Collection;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.util.Time;
import org.junit.Test;
public class TestOverReplicatedBlocks {
/** Test processOverReplicatedBlock can handle corrupt replicas fine.
* It make sure that it won't treat corrupt replicas as valid ones
* thus prevents NN deleting valid replicas but keeping
* corrupt ones.
*/
@Test
public void testProcesOverReplicateBlock() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, 100L);
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
conf.set(
DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,
Integer.toString(2));
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
FileSystem fs = cluster.getFileSystem();
try {
final Path fileName = new Path("/foo1");
DFSTestUtil.createFile(fs, fileName, 2, (short)3, 0L);
DFSTestUtil.waitReplication(fs, fileName, (short)3);
// corrupt the block on datanode 0
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
assertTrue(cluster.corruptReplica(0, block));
DataNodeProperties dnProps = cluster.stopDataNode(0);
// remove block scanner log to trigger block scanning
File scanCursor = new File(new File(MiniDFSCluster.getFinalizedDir(
cluster.getInstanceStorageDir(0, 0),
cluster.getNamesystem().getBlockPoolId()).getParent()).getParent(),
"scanner.cursor");
//wait for one minute for deletion to succeed;
for(int i = 0; !scanCursor.delete(); i++) {
assertTrue("Could not delete " + scanCursor.getAbsolutePath() +
" in one minute", i < 60);
try {
Thread.sleep(1000);
} catch (InterruptedException ignored) {}
}
// restart the datanode so the corrupt replica will be detected
cluster.restartDataNode(dnProps);
DFSTestUtil.waitReplication(fs, fileName, (short)2);
String blockPoolId = cluster.getNamesystem().getBlockPoolId();
final DatanodeID corruptDataNode =
DataNodeTestUtils.getDNRegistrationForBP(
cluster.getDataNodes().get(2), blockPoolId);
final FSNamesystem namesystem = cluster.getNamesystem();
final BlockManager bm = namesystem.getBlockManager();
final HeartbeatManager hm = bm.getDatanodeManager().getHeartbeatManager();
try {
namesystem.writeLock();
synchronized(hm) {
// set live datanode's remaining space to be 0
// so they will be chosen to be deleted when over-replication occurs
String corruptMachineName = corruptDataNode.getXferAddr();
for (DatanodeDescriptor datanode : hm.getDatanodes()) {
if (!corruptMachineName.equals(datanode.getXferAddr())) {
datanode.getStorageInfos()[0].setUtilizationForTesting(100L, 100L, 0, 100L);
datanode.updateHeartbeat(
BlockManagerTestUtil.getStorageReportsForDatanode(datanode),
0L, 0L, 0, 0, null);
}
}
// decrease the replication factor to 1;
NameNodeAdapter.setReplication(namesystem, fileName.toString(), (short)1);
// corrupt one won't be chosen to be excess one
// without 4910 the number of live replicas would be 0: block gets lost
assertEquals(1, bm.countNodes(
bm.getStoredBlock(block.getLocalBlock())).liveReplicas());
}
} finally {
namesystem.writeUnlock();
}
} finally {
cluster.shutdown();
}
}
static final long SMALL_BLOCK_SIZE =
DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT;
static final long SMALL_FILE_LENGTH = SMALL_BLOCK_SIZE * 4;
/**
* The test verifies that replica for deletion is chosen on a node,
* with the oldest heartbeat, when this heartbeat is larger than the
* tolerable heartbeat interval.
* It creates a file with several blocks and replication 4.
* The last DN is configured to send heartbeats rarely.
*
* Test waits until the tolerable heartbeat interval expires, and reduces
* replication of the file. All replica deletions should be scheduled for the
* last node. No replicas will actually be deleted, since last DN doesn't
* send heartbeats.
*/
@Test
public void testChooseReplicaToDelete() throws Exception {
MiniDFSCluster cluster = null;
FileSystem fs = null;
try {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, SMALL_BLOCK_SIZE);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
fs = cluster.getFileSystem();
final FSNamesystem namesystem = cluster.getNamesystem();
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 300);
cluster.startDataNodes(conf, 1, true, null, null, null);
DataNode lastDN = cluster.getDataNodes().get(3);
DatanodeRegistration dnReg = DataNodeTestUtils.getDNRegistrationForBP(
lastDN, namesystem.getBlockPoolId());
String lastDNid = dnReg.getDatanodeUuid();
final Path fileName = new Path("/foo2");
DFSTestUtil.createFile(fs, fileName, SMALL_FILE_LENGTH, (short)4, 0L);
DFSTestUtil.waitReplication(fs, fileName, (short)4);
// Wait for tolerable number of heartbeats plus one
DatanodeDescriptor nodeInfo = null;
long lastHeartbeat = 0;
long waitTime = DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT * 1000 *
(DFSConfigKeys.DFS_NAMENODE_TOLERATE_HEARTBEAT_MULTIPLIER_DEFAULT + 1);
do {
nodeInfo = namesystem.getBlockManager().getDatanodeManager()
.getDatanode(dnReg);
lastHeartbeat = nodeInfo.getLastUpdateMonotonic();
} while (monotonicNow() - lastHeartbeat < waitTime);
fs.setReplication(fileName, (short)3);
BlockLocation locs[] = fs.getFileBlockLocations(
fs.getFileStatus(fileName), 0, Long.MAX_VALUE);
// All replicas for deletion should be scheduled on lastDN.
// And should not actually be deleted, because lastDN does not heartbeat.
namesystem.readLock();
Collection<Block> dnBlocks =
namesystem.getBlockManager().excessReplicateMap.get(lastDNid);
assertEquals("Replicas on node " + lastDNid + " should have been deleted",
SMALL_FILE_LENGTH / SMALL_BLOCK_SIZE, dnBlocks.size());
namesystem.readUnlock();
for(BlockLocation location : locs)
assertEquals("Block should still have 4 replicas",
4, location.getNames().length);
} finally {
if(fs != null) fs.close();
if(cluster != null) cluster.shutdown();
}
}
/**
* Test over replicated block should get invalidated when decreasing the
* replication for a partial block.
*/
@Test
public void testInvalidateOverReplicatedBlock() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3)
.build();
try {
final FSNamesystem namesystem = cluster.getNamesystem();
final BlockManager bm = namesystem.getBlockManager();
FileSystem fs = cluster.getFileSystem();
Path p = new Path(MiniDFSCluster.getBaseDirectory(), "/foo1");
FSDataOutputStream out = fs.create(p, (short) 2);
out.writeBytes("HDFS-3119: " + p);
out.hsync();
fs.setReplication(p, (short) 1);
out.close();
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, p);
assertEquals("Expected only one live replica for the block", 1, bm
.countNodes(bm.getStoredBlock(block.getLocalBlock())).liveReplicas());
} finally {
cluster.shutdown();
}
}
}
| 9,792 | 41.578261 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo.AddBlockResult;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.Mockito;
/**
* This class provides tests for BlockInfo class, which is used in BlocksMap.
* The test covers BlockList.listMoveToHead, used for faster block report
* processing in DatanodeDescriptor.reportDiff.
*/
public class TestBlockInfo {
private static final Log LOG = LogFactory
.getLog("org.apache.hadoop.hdfs.TestBlockInfo");
@Test
public void testIsDeleted() {
BlockInfo blockInfo = new BlockInfoContiguous((short) 3);
BlockCollection bc = Mockito.mock(BlockCollection.class);
blockInfo.setBlockCollection(bc);
Assert.assertFalse(blockInfo.isDeleted());
blockInfo.setBlockCollection(null);
Assert.assertTrue(blockInfo.isDeleted());
}
@Test
public void testAddStorage() throws Exception {
BlockInfo blockInfo = new BlockInfoContiguous((short) 3);
final DatanodeStorageInfo storage = DFSTestUtil.createDatanodeStorageInfo("storageID", "127.0.0.1");
boolean added = blockInfo.addStorage(storage);
Assert.assertTrue(added);
Assert.assertEquals(storage, blockInfo.getStorageInfo(0));
}
@Test
public void testCopyConstructor() {
BlockInfo old = new BlockInfoContiguous((short) 3);
try {
BlockInfo copy = new BlockInfoContiguous((BlockInfoContiguous)old);
assertEquals(old.getBlockCollection(), copy.getBlockCollection());
assertEquals(old.getCapacity(), copy.getCapacity());
} catch (Exception e) {
Assert.fail("Copy constructor throws exception: " + e);
}
}
@Test
public void testReplaceStorage() throws Exception {
// Create two dummy storages.
final DatanodeStorageInfo storage1 = DFSTestUtil.createDatanodeStorageInfo("storageID1", "127.0.0.1");
final DatanodeStorageInfo storage2 = new DatanodeStorageInfo(storage1.getDatanodeDescriptor(), new DatanodeStorage("storageID2"));
final int NUM_BLOCKS = 10;
BlockInfo[] blockInfos = new BlockInfo[NUM_BLOCKS];
// Create a few dummy blocks and add them to the first storage.
for (int i = 0; i < NUM_BLOCKS; ++i) {
blockInfos[i] = new BlockInfoContiguous((short) 3);
storage1.addBlock(blockInfos[i]);
}
// Try to move one of the blocks to a different storage.
boolean added =
storage2.addBlock(blockInfos[NUM_BLOCKS / 2]) == AddBlockResult.ADDED;
Assert.assertThat(added, is(false));
Assert.assertThat(blockInfos[NUM_BLOCKS/2].getStorageInfo(0), is(storage2));
}
@Test
public void testBlockListMoveToHead() throws Exception {
LOG.info("BlockInfo moveToHead tests...");
final int MAX_BLOCKS = 10;
DatanodeStorageInfo dd = DFSTestUtil.createDatanodeStorageInfo("s1", "1.1.1.1");
ArrayList<Block> blockList = new ArrayList<Block>(MAX_BLOCKS);
ArrayList<BlockInfo> blockInfoList = new ArrayList<BlockInfo>();
int headIndex;
int curIndex;
LOG.info("Building block list...");
for (int i = 0; i < MAX_BLOCKS; i++) {
blockList.add(new Block(i, 0, GenerationStamp.LAST_RESERVED_STAMP));
blockInfoList.add(new BlockInfoContiguous(blockList.get(i), (short) 3));
dd.addBlock(blockInfoList.get(i));
// index of the datanode should be 0
assertEquals("Find datanode should be 0", 0, blockInfoList.get(i)
.findStorageInfo(dd));
}
// list length should be equal to the number of blocks we inserted
LOG.info("Checking list length...");
assertEquals("Length should be MAX_BLOCK", MAX_BLOCKS, dd.numBlocks());
Iterator<BlockInfo> it = dd.getBlockIterator();
int len = 0;
while (it.hasNext()) {
it.next();
len++;
}
assertEquals("There should be MAX_BLOCK blockInfo's", MAX_BLOCKS, len);
headIndex = dd.getBlockListHeadForTesting().findStorageInfo(dd);
LOG.info("Moving each block to the head of the list...");
for (int i = 0; i < MAX_BLOCKS; i++) {
curIndex = blockInfoList.get(i).findStorageInfo(dd);
headIndex = dd.moveBlockToHead(blockInfoList.get(i), curIndex, headIndex);
// the moved element must be at the head of the list
assertEquals("Block should be at the head of the list now.",
blockInfoList.get(i), dd.getBlockListHeadForTesting());
}
// move head of the list to the head - this should not change the list
LOG.info("Moving head to the head...");
BlockInfo temp = dd.getBlockListHeadForTesting();
curIndex = 0;
headIndex = 0;
dd.moveBlockToHead(temp, curIndex, headIndex);
assertEquals(
"Moving head to the head of the list shopuld not change the list",
temp, dd.getBlockListHeadForTesting());
// check all elements of the list against the original blockInfoList
LOG.info("Checking elements of the list...");
temp = dd.getBlockListHeadForTesting();
assertNotNull("Head should not be null", temp);
int c = MAX_BLOCKS - 1;
while (temp != null) {
assertEquals("Expected element is not on the list",
blockInfoList.get(c--), temp);
temp = temp.getNext(0);
}
LOG.info("Moving random blocks to the head of the list...");
headIndex = dd.getBlockListHeadForTesting().findStorageInfo(dd);
Random rand = new Random();
for (int i = 0; i < MAX_BLOCKS; i++) {
int j = rand.nextInt(MAX_BLOCKS);
curIndex = blockInfoList.get(j).findStorageInfo(dd);
headIndex = dd.moveBlockToHead(blockInfoList.get(j), curIndex, headIndex);
// the moved element must be at the head of the list
assertEquals("Block should be at the head of the list now.",
blockInfoList.get(j), dd.getBlockListHeadForTesting());
}
}
}
| 7,102 | 37.188172 | 134 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import java.util.Queue;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.blockmanagement.PendingDataNodeMessages.ReportedBlockInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.junit.Test;
import com.google.common.base.Joiner;
public class TestPendingDataNodeMessages {
final PendingDataNodeMessages msgs = new PendingDataNodeMessages();
private final Block block1Gs1 = new Block(1, 0, 1);
private final Block block1Gs2 = new Block(1, 0, 2);
private final Block block1Gs2DifferentInstance =
new Block(1, 0, 2);
private final Block block2Gs1 = new Block(2, 0, 1);
@Test
public void testQueues() {
DatanodeDescriptor fakeDN = DFSTestUtil.getLocalDatanodeDescriptor();
DatanodeStorage storage = new DatanodeStorage("STORAGE_ID");
DatanodeStorageInfo storageInfo = new DatanodeStorageInfo(fakeDN, storage);
msgs.enqueueReportedBlock(storageInfo, block1Gs1, ReplicaState.FINALIZED);
msgs.enqueueReportedBlock(storageInfo, block1Gs2, ReplicaState.FINALIZED);
assertEquals(2, msgs.count());
// Nothing queued yet for block 2
assertNull(msgs.takeBlockQueue(block2Gs1));
assertEquals(2, msgs.count());
Queue<ReportedBlockInfo> q =
msgs.takeBlockQueue(block1Gs2DifferentInstance);
assertEquals(
"ReportedBlockInfo [block=blk_1_1, dn=127.0.0.1:50010, reportedState=FINALIZED]," +
"ReportedBlockInfo [block=blk_1_2, dn=127.0.0.1:50010, reportedState=FINALIZED]",
Joiner.on(",").join(q));
assertEquals(0, msgs.count());
// Should be null if we pull again
assertNull(msgs.takeBlockQueue(block1Gs1));
assertEquals(0, msgs.count());
}
}
| 2,791 | 38.323944 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import java.util.UUID;
import static org.junit.Assert.assertEquals;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.util.VersionInfo;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.mockito.internal.util.reflection.Whitebox;
/**
* Test if FSNamesystem handles heartbeat right
*/
public class TestComputeInvalidateWork {
private Configuration conf;
private final int NUM_OF_DATANODES = 3;
private MiniDFSCluster cluster;
private FSNamesystem namesystem;
private BlockManager bm;
private DatanodeDescriptor[] nodes;
@Before
public void setup() throws Exception {
conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES)
.build();
cluster.waitActive();
namesystem = cluster.getNamesystem();
bm = namesystem.getBlockManager();
nodes = bm.getDatanodeManager().getHeartbeatManager().getDatanodes();
assertEquals(nodes.length, NUM_OF_DATANODES);
}
@After
public void teardown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
/**
* Test if {@link BlockManager#computeInvalidateWork(int)}
* can schedule invalidate work correctly
*/
@Test(timeout=120000)
public void testCompInvalidate() throws Exception {
final int blockInvalidateLimit = bm.getDatanodeManager()
.blockInvalidateLimit;
namesystem.writeLock();
try {
for (int i=0; i<nodes.length; i++) {
for(int j=0; j<3*blockInvalidateLimit+1; j++) {
Block block = new Block(i*(blockInvalidateLimit+1)+j, 0,
GenerationStamp.LAST_RESERVED_STAMP);
bm.addToInvalidates(block, nodes[i]);
}
}
assertEquals(blockInvalidateLimit*NUM_OF_DATANODES,
bm.computeInvalidateWork(NUM_OF_DATANODES+1));
assertEquals(blockInvalidateLimit*NUM_OF_DATANODES,
bm.computeInvalidateWork(NUM_OF_DATANODES));
assertEquals(blockInvalidateLimit*(NUM_OF_DATANODES-1),
bm.computeInvalidateWork(NUM_OF_DATANODES-1));
int workCount = bm.computeInvalidateWork(1);
if (workCount == 1) {
assertEquals(blockInvalidateLimit+1, bm.computeInvalidateWork(2));
} else {
assertEquals(workCount, blockInvalidateLimit);
assertEquals(2, bm.computeInvalidateWork(2));
}
} finally {
namesystem.writeUnlock();
}
}
/**
* Reformatted DataNodes will replace the original UUID in the
* {@link DatanodeManager#datanodeMap}. This tests if block
* invalidation work on the original DataNode can be skipped.
*/
@Test(timeout=120000)
public void testDatanodeReformat() throws Exception {
namesystem.writeLock();
try {
// Change the datanode UUID to emulate a reformat
String poolId = cluster.getNamesystem().getBlockPoolId();
DatanodeRegistration dnr = cluster.getDataNode(nodes[0].getIpcPort())
.getDNRegistrationForBP(poolId);
dnr = new DatanodeRegistration(UUID.randomUUID().toString(), dnr);
cluster.stopDataNode(nodes[0].getXferAddr());
Block block = new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP);
bm.addToInvalidates(block, nodes[0]);
bm.getDatanodeManager().registerDatanode(dnr);
// Since UUID has changed, the invalidation work should be skipped
assertEquals(0, bm.computeInvalidateWork(1));
assertEquals(0, bm.getPendingDeletionBlocksCount());
} finally {
namesystem.writeUnlock();
}
}
@Test(timeout=12000)
public void testDatanodeReRegistration() throws Exception {
// Create a test file
final DistributedFileSystem dfs = cluster.getFileSystem();
final Path path = new Path("/testRR");
// Create a file and shutdown the DNs, which populates InvalidateBlocks
DFSTestUtil.createFile(dfs, path, dfs.getDefaultBlockSize(),
(short) NUM_OF_DATANODES, 0xED0ED0);
for (DataNode dn : cluster.getDataNodes()) {
dn.shutdown();
}
dfs.delete(path, false);
namesystem.writeLock();
InvalidateBlocks invalidateBlocks;
int expected = NUM_OF_DATANODES;
try {
invalidateBlocks = (InvalidateBlocks) Whitebox
.getInternalState(cluster.getNamesystem().getBlockManager(),
"invalidateBlocks");
assertEquals("Expected invalidate blocks to be the number of DNs",
(long) expected, invalidateBlocks.numBlocks());
} finally {
namesystem.writeUnlock();
}
// Re-register each DN and see that it wipes the invalidation work
for (DataNode dn : cluster.getDataNodes()) {
DatanodeID did = dn.getDatanodeId();
DatanodeRegistration reg = new DatanodeRegistration(
new DatanodeID(UUID.randomUUID().toString(), did),
new StorageInfo(HdfsServerConstants.NodeType.DATA_NODE),
new ExportedBlockKeys(),
VersionInfo.getVersion());
namesystem.writeLock();
try {
bm.getDatanodeManager().registerDatanode(reg);
expected--;
assertEquals("Expected number of invalidate blocks to decrease",
(long) expected, invalidateBlocks.numBlocks());
} finally {
namesystem.writeUnlock();
}
}
}
}
| 6,900 | 36.505435 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.ExecutionException;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.util.Daemon;
import org.junit.Assert;
import com.google.common.base.Preconditions;
public class BlockManagerTestUtil {
public static void setNodeReplicationLimit(final BlockManager blockManager,
final int limit) {
blockManager.maxReplicationStreams = limit;
}
/** @return the datanode descriptor for the given the given storageID. */
public static DatanodeDescriptor getDatanode(final FSNamesystem ns,
final String storageID) {
ns.readLock();
try {
return ns.getBlockManager().getDatanodeManager().getDatanode(storageID);
} finally {
ns.readUnlock();
}
}
/**
* Refresh block queue counts on the name-node.
*/
public static void updateState(final BlockManager blockManager) {
blockManager.updateState();
}
/**
* @return a tuple of the replica state (number racks, number live
* replicas, and number needed replicas) for the given block.
*/
public static int[] getReplicaInfo(final FSNamesystem namesystem, final Block b) {
final BlockManager bm = namesystem.getBlockManager();
namesystem.readLock();
try {
final BlockInfo storedBlock = bm.getStoredBlock(b);
return new int[]{getNumberOfRacks(bm, b),
bm.countNodes(storedBlock).liveReplicas(),
bm.neededReplications.contains(storedBlock) ? 1 : 0};
} finally {
namesystem.readUnlock();
}
}
/**
* @return the number of racks over which a given block is replicated
* decommissioning/decommissioned nodes are not counted. corrupt replicas
* are also ignored
*/
private static int getNumberOfRacks(final BlockManager blockManager,
final Block b) {
final Set<String> rackSet = new HashSet<String>(0);
final Collection<DatanodeDescriptor> corruptNodes =
getCorruptReplicas(blockManager).getNodes(b);
for(DatanodeStorageInfo storage : blockManager.blocksMap.getStorages(b)) {
final DatanodeDescriptor cur = storage.getDatanodeDescriptor();
if (!cur.isDecommissionInProgress() && !cur.isDecommissioned()) {
if ((corruptNodes == null ) || !corruptNodes.contains(cur)) {
String rackName = cur.getNetworkLocation();
if (!rackSet.contains(rackName)) {
rackSet.add(rackName);
}
}
}
}
return rackSet.size();
}
/**
* @return replication monitor thread instance from block manager.
*/
public static Daemon getReplicationThread(final BlockManager blockManager)
{
return blockManager.replicationThread;
}
/**
* Stop the replication monitor thread
*/
public static void stopReplicationThread(final BlockManager blockManager)
throws IOException {
blockManager.enableRMTerminationForTesting();
blockManager.replicationThread.interrupt();
try {
blockManager.replicationThread.join();
} catch(InterruptedException ie) {
throw new IOException(
"Interrupted while trying to stop ReplicationMonitor");
}
}
/**
* @return corruptReplicas from block manager
*/
public static CorruptReplicasMap getCorruptReplicas(final BlockManager blockManager){
return blockManager.corruptReplicas;
}
/**
* @return computed block replication and block invalidation work that can be
* scheduled on data-nodes.
* @throws IOException
*/
public static int getComputedDatanodeWork(final BlockManager blockManager) throws IOException
{
return blockManager.computeDatanodeWork();
}
public static int computeInvalidationWork(BlockManager bm) {
return bm.computeInvalidateWork(Integer.MAX_VALUE);
}
/**
* Compute all the replication and invalidation work for the
* given BlockManager.
*
* This differs from the above functions in that it computes
* replication work for all DNs rather than a particular subset,
* regardless of invalidation/replication limit configurations.
*
* NB: you may want to set
* {@link DFSConfigKeys#DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY} to
* a high value to ensure that all work is calculated.
*/
public static int computeAllPendingWork(BlockManager bm) {
int work = computeInvalidationWork(bm);
work += bm.computeReplicationWork(Integer.MAX_VALUE);
return work;
}
/**
* Ensure that the given NameNode marks the specified DataNode as
* entirely dead/expired.
* @param nn the NameNode to manipulate
* @param dnName the name of the DataNode
*/
public static void noticeDeadDatanode(NameNode nn, String dnName) {
FSNamesystem namesystem = nn.getNamesystem();
namesystem.writeLock();
try {
DatanodeManager dnm = namesystem.getBlockManager().getDatanodeManager();
HeartbeatManager hbm = dnm.getHeartbeatManager();
DatanodeDescriptor[] dnds = hbm.getDatanodes();
DatanodeDescriptor theDND = null;
for (DatanodeDescriptor dnd : dnds) {
if (dnd.getXferAddr().equals(dnName)) {
theDND = dnd;
}
}
Assert.assertNotNull("Could not find DN with name: " + dnName, theDND);
synchronized (hbm) {
DFSTestUtil.setDatanodeDead(theDND);
hbm.heartbeatCheck();
}
} finally {
namesystem.writeUnlock();
}
}
/**
* Change whether the block placement policy will prefer the writer's
* local Datanode or not.
* @param prefer if true, prefer local node
*/
public static void setWritingPrefersLocalNode(
BlockManager bm, boolean prefer) {
BlockPlacementPolicy bpp = bm.getBlockPlacementPolicy();
Preconditions.checkState(bpp instanceof BlockPlacementPolicyDefault,
"Must use default policy, got %s", bpp.getClass());
((BlockPlacementPolicyDefault)bpp).setPreferLocalNode(prefer);
}
/**
* Call heartbeat check function of HeartbeatManager
* @param bm the BlockManager to manipulate
*/
public static void checkHeartbeat(BlockManager bm) {
bm.getDatanodeManager().getHeartbeatManager().heartbeatCheck();
}
/**
* Call heartbeat check function of HeartbeatManager and get
* under replicated blocks count within write lock to make sure
* computeDatanodeWork doesn't interfere.
* @param namesystem the FSNamesystem
* @param bm the BlockManager to manipulate
* @return the number of under replicated blocks
*/
public static int checkHeartbeatAndGetUnderReplicatedBlocksCount(
FSNamesystem namesystem, BlockManager bm) {
namesystem.writeLock();
try {
bm.getDatanodeManager().getHeartbeatManager().heartbeatCheck();
return bm.getUnderReplicatedNotMissingBlocks();
} finally {
namesystem.writeUnlock();
}
}
public static DatanodeStorageInfo updateStorage(DatanodeDescriptor dn,
DatanodeStorage s) {
return dn.updateStorage(s);
}
/**
* Call heartbeat check function of HeartbeatManager
* @param bm the BlockManager to manipulate
*/
public static void rescanPostponedMisreplicatedBlocks(BlockManager bm) {
bm.rescanPostponedMisreplicatedBlocks();
}
public static DatanodeDescriptor getLocalDatanodeDescriptor(
boolean initializeStorage) {
DatanodeDescriptor dn = new DatanodeDescriptor(DFSTestUtil.getLocalDatanodeID());
if (initializeStorage) {
dn.updateStorage(new DatanodeStorage(DatanodeStorage.generateUuid()));
}
return dn;
}
public static DatanodeDescriptor getDatanodeDescriptor(String ipAddr,
String rackLocation, boolean initializeStorage) {
return getDatanodeDescriptor(ipAddr, rackLocation,
initializeStorage? new DatanodeStorage(DatanodeStorage.generateUuid()): null);
}
public static DatanodeDescriptor getDatanodeDescriptor(String ipAddr,
String rackLocation, DatanodeStorage storage) {
return getDatanodeDescriptor(ipAddr, rackLocation, storage, "host");
}
public static DatanodeDescriptor getDatanodeDescriptor(String ipAddr,
String rackLocation, DatanodeStorage storage, String hostname) {
DatanodeDescriptor dn = DFSTestUtil.getDatanodeDescriptor(ipAddr,
DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT, rackLocation, hostname);
if (storage != null) {
dn.updateStorage(storage);
}
return dn;
}
public static DatanodeStorageInfo newDatanodeStorageInfo(
DatanodeDescriptor dn, DatanodeStorage s) {
return new DatanodeStorageInfo(dn, s);
}
public static StorageReport[] getStorageReportsForDatanode(
DatanodeDescriptor dnd) {
ArrayList<StorageReport> reports = new ArrayList<StorageReport>();
for (DatanodeStorageInfo storage : dnd.getStorageInfos()) {
DatanodeStorage dns = new DatanodeStorage(
storage.getStorageID(), storage.getState(), storage.getStorageType());
StorageReport report = new StorageReport(
dns ,false, storage.getCapacity(),
storage.getDfsUsed(), storage.getRemaining(),
storage.getBlockPoolUsed());
reports.add(report);
}
return reports.toArray(StorageReport.EMPTY_ARRAY);
}
/**
* Have DatanodeManager check decommission state.
* @param dm the DatanodeManager to manipulate
*/
public static void recheckDecommissionState(DatanodeManager dm)
throws ExecutionException, InterruptedException {
dm.getDecomManager().runMonitor();
}
}
| 10,743 | 33.658065 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHostFileManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.internal.util.reflection.Whitebox;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.Map;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
public class TestHostFileManager {
private static InetSocketAddress entry(String e) {
return HostFileManager.parseEntry("dummy", "dummy", e);
}
@Test
public void testDeduplication() {
HostFileManager.HostSet s = new HostFileManager.HostSet();
// These entries will be de-duped, since they refer to the same IP
// address + port combo.
s.add(entry("127.0.0.1:12345"));
s.add(entry("localhost:12345"));
Assert.assertEquals(1, s.size());
s.add(entry("127.0.0.1:12345"));
Assert.assertEquals(1, s.size());
// The following entries should not be de-duped.
s.add(entry("127.0.0.1:12346"));
Assert.assertEquals(2, s.size());
s.add(entry("127.0.0.1"));
Assert.assertEquals(3, s.size());
s.add(entry("127.0.0.10"));
Assert.assertEquals(4, s.size());
}
@Test
public void testRelation() {
HostFileManager.HostSet s = new HostFileManager.HostSet();
s.add(entry("127.0.0.1:123"));
Assert.assertTrue(s.match(entry("127.0.0.1:123")));
Assert.assertFalse(s.match(entry("127.0.0.1:12")));
Assert.assertFalse(s.match(entry("127.0.0.1")));
Assert.assertFalse(s.matchedBy(entry("127.0.0.1:12")));
Assert.assertTrue(s.matchedBy(entry("127.0.0.1")));
Assert.assertTrue(s.matchedBy(entry("127.0.0.1:123")));
Assert.assertFalse(s.match(entry("127.0.0.2")));
Assert.assertFalse(s.match(entry("127.0.0.2:123")));
Assert.assertFalse(s.matchedBy(entry("127.0.0.2")));
Assert.assertFalse(s.matchedBy(entry("127.0.0.2:123")));
s.add(entry("127.0.0.1"));
Assert.assertTrue(s.match(entry("127.0.0.1:123")));
Assert.assertTrue(s.match(entry("127.0.0.1:12")));
Assert.assertTrue(s.match(entry("127.0.0.1")));
Assert.assertFalse(s.matchedBy(entry("127.0.0.1:12")));
Assert.assertTrue(s.matchedBy(entry("127.0.0.1")));
Assert.assertTrue(s.matchedBy(entry("127.0.0.1:123")));
Assert.assertFalse(s.match(entry("127.0.0.2")));
Assert.assertFalse(s.match(entry("127.0.0.2:123")));
Assert.assertFalse(s.matchedBy(entry("127.0.0.2")));
Assert.assertFalse(s.matchedBy(entry("127.0.0.2:123")));
s.add(entry("127.0.0.2:123"));
Assert.assertTrue(s.match(entry("127.0.0.1:123")));
Assert.assertTrue(s.match(entry("127.0.0.1:12")));
Assert.assertTrue(s.match(entry("127.0.0.1")));
Assert.assertFalse(s.matchedBy(entry("127.0.0.1:12")));
Assert.assertTrue(s.matchedBy(entry("127.0.0.1")));
Assert.assertTrue(s.matchedBy(entry("127.0.0.1:123")));
Assert.assertFalse(s.match(entry("127.0.0.2")));
Assert.assertTrue(s.match(entry("127.0.0.2:123")));
Assert.assertTrue(s.matchedBy(entry("127.0.0.2")));
Assert.assertTrue(s.matchedBy(entry("127.0.0.2:123")));
}
@Test
@SuppressWarnings("unchecked")
public void testIncludeExcludeLists() throws IOException {
BlockManager bm = mock(BlockManager.class);
FSNamesystem fsn = mock(FSNamesystem.class);
Configuration conf = new Configuration();
HostFileManager hm = mock(HostFileManager.class);
HostFileManager.HostSet includedNodes = new HostFileManager.HostSet();
HostFileManager.HostSet excludedNodes = new HostFileManager.HostSet();
includedNodes.add(entry("127.0.0.1:12345"));
includedNodes.add(entry("localhost:12345"));
includedNodes.add(entry("127.0.0.1:12345"));
includedNodes.add(entry("127.0.0.2"));
excludedNodes.add(entry("127.0.0.1:12346"));
excludedNodes.add(entry("127.0.30.1:12346"));
Assert.assertEquals(2, includedNodes.size());
Assert.assertEquals(2, excludedNodes.size());
doReturn(includedNodes).when(hm).getIncludes();
doReturn(excludedNodes).when(hm).getExcludes();
DatanodeManager dm = new DatanodeManager(bm, fsn, conf);
Whitebox.setInternalState(dm, "hostFileManager", hm);
Map<String, DatanodeDescriptor> dnMap = (Map<String,
DatanodeDescriptor>) Whitebox.getInternalState(dm, "datanodeMap");
// After the de-duplication, there should be only one DN from the included
// nodes declared as dead.
Assert.assertEquals(2, dm.getDatanodeListForReport(HdfsConstants
.DatanodeReportType.ALL).size());
Assert.assertEquals(2, dm.getDatanodeListForReport(HdfsConstants
.DatanodeReportType.DEAD).size());
dnMap.put("uuid-foo", new DatanodeDescriptor(new DatanodeID("127.0.0.1",
"localhost", "uuid-foo", 12345, 1020, 1021, 1022)));
Assert.assertEquals(1, dm.getDatanodeListForReport(HdfsConstants
.DatanodeReportType.DEAD).size());
dnMap.put("uuid-bar", new DatanodeDescriptor(new DatanodeID("127.0.0.2",
"127.0.0.2", "uuid-bar", 12345, 1020, 1021, 1022)));
Assert.assertEquals(0, dm.getDatanodeListForReport(HdfsConstants
.DatanodeReportType.DEAD).size());
DatanodeDescriptor spam = new DatanodeDescriptor(new DatanodeID("127.0.0" +
".3", "127.0.0.3", "uuid-spam", 12345, 1020, 1021, 1022));
DFSTestUtil.setDatanodeDead(spam);
includedNodes.add(entry("127.0.0.3:12345"));
dnMap.put("uuid-spam", spam);
Assert.assertEquals(1, dm.getDatanodeListForReport(HdfsConstants
.DatanodeReportType.DEAD).size());
dnMap.remove("uuid-spam");
Assert.assertEquals(1, dm.getDatanodeListForReport(HdfsConstants
.DatanodeReportType.DEAD).size());
excludedNodes.add(entry("127.0.0.3"));
Assert.assertEquals(0, dm.getDatanodeListForReport(HdfsConstants
.DatanodeReportType.DEAD).size());
}
}
| 6,881 | 42.283019 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.TestBlockStoragePolicy;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.NetworkTopologyWithNodeGroup;
import org.apache.hadoop.net.Node;
import org.apache.hadoop.test.PathUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class TestReplicationPolicyWithNodeGroup {
private static final int BLOCK_SIZE = 1024;
private static final int NUM_OF_DATANODES = 8;
private static final int NUM_OF_DATANODES_BOUNDARY = 6;
private static final int NUM_OF_DATANODES_MORE_TARGETS = 12;
private static final int NUM_OF_DATANODES_FOR_DEPENDENCIES = 6;
private final Configuration CONF = new HdfsConfiguration();
private NetworkTopology cluster;
private NameNode namenode;
private BlockPlacementPolicy replicator;
private static final String filename = "/dummyfile.txt";
private static final DatanodeStorageInfo[] storages;
private static final DatanodeDescriptor[] dataNodes;
static {
final String[] racks = {
"/d1/r1/n1",
"/d1/r1/n1",
"/d1/r1/n2",
"/d1/r2/n3",
"/d1/r2/n3",
"/d1/r2/n4",
"/d2/r3/n5",
"/d2/r3/n6"
};
storages = DFSTestUtil.createDatanodeStorageInfos(racks);
dataNodes = DFSTestUtil.toDatanodeDescriptor(storages);
}
private static final DatanodeStorageInfo[] storagesInBoundaryCase;
private static final DatanodeDescriptor[] dataNodesInBoundaryCase;
static {
final String[] racksInBoundaryCase = {
"/d1/r1/n1",
"/d1/r1/n1",
"/d1/r1/n1",
"/d1/r1/n2",
"/d1/r2/n3",
"/d1/r2/n3"
};
storagesInBoundaryCase = DFSTestUtil.createDatanodeStorageInfos(racksInBoundaryCase);
dataNodesInBoundaryCase = DFSTestUtil.toDatanodeDescriptor(storagesInBoundaryCase);
}
private static final DatanodeStorageInfo[] storagesInMoreTargetsCase;
private final static DatanodeDescriptor[] dataNodesInMoreTargetsCase;
static {
final String[] racksInMoreTargetsCase = {
"/r1/n1",
"/r1/n1",
"/r1/n2",
"/r1/n2",
"/r1/n3",
"/r1/n3",
"/r2/n4",
"/r2/n4",
"/r2/n5",
"/r2/n5",
"/r2/n6",
"/r2/n6"
};
storagesInMoreTargetsCase = DFSTestUtil.createDatanodeStorageInfos(racksInMoreTargetsCase);
dataNodesInMoreTargetsCase = DFSTestUtil.toDatanodeDescriptor(storagesInMoreTargetsCase);
};
private final static DatanodeDescriptor NODE =
new DatanodeDescriptor(DFSTestUtil.getDatanodeDescriptor("9.9.9.9", "/d2/r4/n7"));
private static final DatanodeStorageInfo[] storagesForDependencies;
private static final DatanodeDescriptor[] dataNodesForDependencies;
static {
final String[] racksForDependencies = {
"/d1/r1/n1",
"/d1/r1/n1",
"/d1/r1/n2",
"/d1/r1/n2",
"/d1/r1/n3",
"/d1/r1/n4"
};
final String[] hostNamesForDependencies = {
"h1",
"h2",
"h3",
"h4",
"h5",
"h6"
};
storagesForDependencies = DFSTestUtil.createDatanodeStorageInfos(
racksForDependencies, hostNamesForDependencies);
dataNodesForDependencies = DFSTestUtil.toDatanodeDescriptor(storagesForDependencies);
};
@Before
public void setUp() throws Exception {
FileSystem.setDefaultUri(CONF, "hdfs://localhost:0");
CONF.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
// Set properties to make HDFS aware of NodeGroup.
CONF.set(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY,
BlockPlacementPolicyWithNodeGroup.class.getName());
CONF.set(CommonConfigurationKeysPublic.NET_TOPOLOGY_IMPL_KEY,
NetworkTopologyWithNodeGroup.class.getName());
CONF.setBoolean(DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, true);
File baseDir = PathUtils.getTestDir(TestReplicationPolicyWithNodeGroup.class);
CONF.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
new File(baseDir, "name").getPath());
DFSTestUtil.formatNameNode(CONF);
namenode = new NameNode(CONF);
final BlockManager bm = namenode.getNamesystem().getBlockManager();
replicator = bm.getBlockPlacementPolicy();
cluster = bm.getDatanodeManager().getNetworkTopology();
// construct network topology
for(int i=0; i<NUM_OF_DATANODES; i++) {
cluster.add(dataNodes[i]);
}
setupDataNodeCapacity();
}
@After
public void tearDown() throws Exception {
namenode.stop();
}
private static void updateHeartbeatWithUsage(DatanodeDescriptor dn,
long capacity, long dfsUsed, long remaining, long blockPoolUsed,
long dnCacheCapacity, long dnCacheUsed, int xceiverCount,
int volFailures) {
dn.getStorageInfos()[0].setUtilizationForTesting(
capacity, dfsUsed, remaining, blockPoolUsed);
dn.updateHeartbeat(
BlockManagerTestUtil.getStorageReportsForDatanode(dn),
dnCacheCapacity, dnCacheUsed, xceiverCount, volFailures, null);
}
private static void setupDataNodeCapacity() {
for(int i=0; i<NUM_OF_DATANODES; i++) {
updateHeartbeatWithUsage(dataNodes[i],
2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
}
}
/**
* Scan the targets list: all targets should be on different NodeGroups.
* Return false if two targets are found on the same NodeGroup.
*/
private static boolean checkTargetsOnDifferentNodeGroup(
DatanodeStorageInfo[] targets) {
if(targets.length == 0)
return true;
Set<String> targetSet = new HashSet<String>();
for(DatanodeStorageInfo storage:targets) {
final DatanodeDescriptor node = storage.getDatanodeDescriptor();
String nodeGroup = NetworkTopology.getLastHalf(node.getNetworkLocation());
if(targetSet.contains(nodeGroup)) {
return false;
} else {
targetSet.add(nodeGroup);
}
}
return true;
}
private boolean isOnSameRack(DatanodeStorageInfo left, DatanodeStorageInfo right) {
return isOnSameRack(left.getDatanodeDescriptor(), right);
}
private boolean isOnSameRack(DatanodeDescriptor left, DatanodeStorageInfo right) {
return cluster.isOnSameRack(left, right.getDatanodeDescriptor());
}
private boolean isOnSameNodeGroup(DatanodeStorageInfo left, DatanodeStorageInfo right) {
return isOnSameNodeGroup(left.getDatanodeDescriptor(), right);
}
private boolean isOnSameNodeGroup(DatanodeDescriptor left, DatanodeStorageInfo right) {
return cluster.isOnSameNodeGroup(left, right.getDatanodeDescriptor());
}
private DatanodeStorageInfo[] chooseTarget(int numOfReplicas) {
return chooseTarget(numOfReplicas, dataNodes[0]);
}
private DatanodeStorageInfo[] chooseTarget(int numOfReplicas,
DatanodeDescriptor writer) {
return chooseTarget(numOfReplicas, writer,
new ArrayList<DatanodeStorageInfo>());
}
private DatanodeStorageInfo[] chooseTarget(int numOfReplicas,
List<DatanodeStorageInfo> chosenNodes) {
return chooseTarget(numOfReplicas, dataNodes[0], chosenNodes);
}
private DatanodeStorageInfo[] chooseTarget(int numOfReplicas,
DatanodeDescriptor writer, List<DatanodeStorageInfo> chosenNodes) {
return chooseTarget(numOfReplicas, writer, chosenNodes, null);
}
private DatanodeStorageInfo[] chooseTarget(
int numOfReplicas,
DatanodeDescriptor writer,
List<DatanodeStorageInfo> chosenNodes,
Set<Node> excludedNodes) {
return replicator.chooseTarget(filename, numOfReplicas, writer, chosenNodes,
false, excludedNodes, BLOCK_SIZE, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY);
}
/**
* In this testcase, client is dataNodes[0]. So the 1st replica should be
* placed on dataNodes[0], the 2nd replica should be placed on
* different rack and third should be placed on different node (and node group)
* of rack chosen for 2nd node.
* The only excpetion is when the <i>numOfReplicas</i> is 2,
* the 1st is on dataNodes[0] and the 2nd is on a different rack.
* @throws Exception
*/
@Test
public void testChooseTarget1() throws Exception {
updateHeartbeatWithUsage(dataNodes[0],
2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
0L, 0L, 4, 0); // overloaded
DatanodeStorageInfo[] targets;
targets = chooseTarget(0);
assertEquals(targets.length, 0);
targets = chooseTarget(1);
assertEquals(targets.length, 1);
assertEquals(storages[0], targets[0]);
targets = chooseTarget(2);
assertEquals(targets.length, 2);
assertEquals(storages[0], targets[0]);
assertFalse(isOnSameRack(targets[0], targets[1]));
targets = chooseTarget(3);
assertEquals(targets.length, 3);
assertEquals(storages[0], targets[0]);
assertFalse(isOnSameRack(targets[0], targets[1]));
assertTrue(isOnSameRack(targets[1], targets[2]));
assertFalse(isOnSameNodeGroup(targets[1], targets[2]));
targets = chooseTarget(4);
assertEquals(targets.length, 4);
assertEquals(storages[0], targets[0]);
assertTrue(isOnSameRack(targets[1], targets[2]) ||
isOnSameRack(targets[2], targets[3]));
assertFalse(isOnSameRack(targets[0], targets[2]));
// Make sure no more than one replicas are on the same nodegroup
verifyNoTwoTargetsOnSameNodeGroup(targets);
updateHeartbeatWithUsage(dataNodes[0],
2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
}
private void verifyNoTwoTargetsOnSameNodeGroup(DatanodeStorageInfo[] targets) {
Set<String> nodeGroupSet = new HashSet<String>();
for (DatanodeStorageInfo target: targets) {
nodeGroupSet.add(target.getDatanodeDescriptor().getNetworkLocation());
}
assertEquals(nodeGroupSet.size(), targets.length);
}
/**
* In this testcase, client is dataNodes[0], but the dataNodes[1] is
* not allowed to be chosen. So the 1st replica should be
* placed on dataNodes[0], the 2nd replica should be placed on a different
* rack, the 3rd should be on same rack as the 2nd replica but in different
* node group, and the rest should be placed on a third rack.
* @throws Exception
*/
@Test
public void testChooseTarget2() throws Exception {
DatanodeStorageInfo[] targets;
BlockPlacementPolicyDefault repl = (BlockPlacementPolicyDefault)replicator;
List<DatanodeStorageInfo> chosenNodes = new ArrayList<DatanodeStorageInfo>();
Set<Node> excludedNodes = new HashSet<Node>();
excludedNodes.add(dataNodes[1]);
targets = repl.chooseTarget(filename, 4, dataNodes[0], chosenNodes, false,
excludedNodes, BLOCK_SIZE, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY);
assertEquals(targets.length, 4);
assertEquals(storages[0], targets[0]);
assertTrue(cluster.isNodeGroupAware());
// Make sure no replicas are on the same nodegroup
for (int i=1;i<4;i++) {
assertFalse(isOnSameNodeGroup(targets[0], targets[i]));
}
assertTrue(isOnSameRack(targets[1], targets[2]) ||
isOnSameRack(targets[2], targets[3]));
assertFalse(isOnSameRack(targets[1], targets[3]));
excludedNodes.clear();
chosenNodes.clear();
excludedNodes.add(dataNodes[1]);
chosenNodes.add(storages[2]);
targets = repl.chooseTarget(filename, 1, dataNodes[0], chosenNodes, true,
excludedNodes, BLOCK_SIZE, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY);
System.out.println("targets=" + Arrays.asList(targets));
assertEquals(2, targets.length);
//make sure that the chosen node is in the target.
int i = 0;
for(; i < targets.length && !storages[2].equals(targets[i]); i++);
assertTrue(i < targets.length);
}
/**
* In this testcase, client is dataNodes[0], but dataNodes[0] is not qualified
* to be chosen. So the 1st replica should be placed on dataNodes[1],
* the 2nd replica should be placed on a different rack,
* the 3rd replica should be placed on the same rack as the 2nd replica but in different nodegroup,
* and the rest should be placed on the third rack.
* @throws Exception
*/
@Test
public void testChooseTarget3() throws Exception {
// make data node 0 to be not qualified to choose
updateHeartbeatWithUsage(dataNodes[0],
2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
(HdfsServerConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L,
0L, 0L, 0, 0); // no space
DatanodeStorageInfo[] targets;
targets = chooseTarget(0);
assertEquals(targets.length, 0);
targets = chooseTarget(1);
assertEquals(targets.length, 1);
assertEquals(storages[1], targets[0]);
targets = chooseTarget(2);
assertEquals(targets.length, 2);
assertEquals(storages[1], targets[0]);
assertFalse(isOnSameRack(targets[0], targets[1]));
targets = chooseTarget(3);
assertEquals(targets.length, 3);
assertEquals(storages[1], targets[0]);
assertTrue(isOnSameRack(targets[1], targets[2]));
assertFalse(isOnSameRack(targets[0], targets[1]));
targets = chooseTarget(4);
assertEquals(targets.length, 4);
assertEquals(storages[1], targets[0]);
assertTrue(cluster.isNodeGroupAware());
verifyNoTwoTargetsOnSameNodeGroup(targets);
assertTrue(isOnSameRack(targets[1], targets[2]) ||
isOnSameRack(targets[2], targets[3]));
updateHeartbeatWithUsage(dataNodes[0],
2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
}
/**
* In this testcase, client is dataNodes[0], but none of the nodes on rack 1
* is qualified to be chosen. So the 1st replica should be placed on either
* rack 2 or rack 3.
* the 2nd replica should be placed on a different rack,
* the 3rd replica should be placed on the same rack as the 1st replica, but
* in different node group.
* @throws Exception
*/
@Test
public void testChooseTarget4() throws Exception {
// make data node 0-2 to be not qualified to choose: not enough disk space
for(int i=0; i<3; i++) {
updateHeartbeatWithUsage(dataNodes[i],
2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
(HdfsServerConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
}
DatanodeStorageInfo[] targets;
targets = chooseTarget(0);
assertEquals(targets.length, 0);
targets = chooseTarget(1);
assertEquals(targets.length, 1);
assertFalse(isOnSameRack(dataNodes[0], targets[0]));
targets = chooseTarget(2);
assertEquals(targets.length, 2);
assertFalse(isOnSameRack(dataNodes[0], targets[0]));
assertFalse(isOnSameRack(targets[0], targets[1]));
targets = chooseTarget(3);
assertEquals(targets.length, 3);
for(int i=0; i<3; i++) {
assertFalse(isOnSameRack(dataNodes[0], targets[i]));
}
verifyNoTwoTargetsOnSameNodeGroup(targets);
assertTrue(isOnSameRack(targets[0], targets[1]) ||
isOnSameRack(targets[1], targets[2]));
assertFalse(isOnSameRack(targets[0], targets[2]));
}
/**
* In this testcase, client is is a node outside of file system.
* So the 1st replica can be placed on any node.
* the 2nd replica should be placed on a different rack,
* the 3rd replica should be placed on the same rack as the 2nd replica,
* @throws Exception
*/
@Test
public void testChooseTarget5() throws Exception {
setupDataNodeCapacity();
DatanodeStorageInfo[] targets;
targets = chooseTarget(0, NODE);
assertEquals(targets.length, 0);
targets = chooseTarget(1, NODE);
assertEquals(targets.length, 1);
targets = chooseTarget(2, NODE);
assertEquals(targets.length, 2);
assertFalse(isOnSameRack(targets[0], targets[1]));
targets = chooseTarget(3, NODE);
assertEquals(targets.length, 3);
assertTrue(isOnSameRack(targets[1], targets[2]));
assertFalse(isOnSameRack(targets[0], targets[1]));
verifyNoTwoTargetsOnSameNodeGroup(targets);
}
/**
* This testcase tests re-replication, when dataNodes[0] is already chosen.
* So the 1st replica can be placed on random rack.
* the 2nd replica should be placed on different node and nodegroup by same rack as
* the 1st replica. The 3rd replica can be placed randomly.
* @throws Exception
*/
@Test
public void testRereplicate1() throws Exception {
setupDataNodeCapacity();
List<DatanodeStorageInfo> chosenNodes = new ArrayList<DatanodeStorageInfo>();
chosenNodes.add(storages[0]);
DatanodeStorageInfo[] targets;
targets = chooseTarget(0, chosenNodes);
assertEquals(targets.length, 0);
targets = chooseTarget(1, chosenNodes);
assertEquals(targets.length, 1);
assertFalse(isOnSameRack(dataNodes[0], targets[0]));
targets = chooseTarget(2, chosenNodes);
assertEquals(targets.length, 2);
assertTrue(isOnSameRack(dataNodes[0], targets[0]));
assertFalse(isOnSameRack(targets[0], targets[1]));
targets = chooseTarget(3, chosenNodes);
assertEquals(targets.length, 3);
assertTrue(isOnSameRack(dataNodes[0], targets[0]));
assertFalse(isOnSameNodeGroup(dataNodes[0], targets[0]));
assertFalse(isOnSameRack(targets[0], targets[2]));
}
/**
* This testcase tests re-replication,
* when dataNodes[0] and dataNodes[1] are already chosen.
* So the 1st replica should be placed on a different rack of rack 1.
* the rest replicas can be placed randomly,
* @throws Exception
*/
@Test
public void testRereplicate2() throws Exception {
setupDataNodeCapacity();
List<DatanodeStorageInfo> chosenNodes = new ArrayList<DatanodeStorageInfo>();
chosenNodes.add(storages[0]);
chosenNodes.add(storages[1]);
DatanodeStorageInfo[] targets;
targets = chooseTarget(0, chosenNodes);
assertEquals(targets.length, 0);
targets = chooseTarget(1, chosenNodes);
assertEquals(targets.length, 1);
assertFalse(isOnSameRack(dataNodes[0], targets[0]));
targets = chooseTarget(2, chosenNodes);
assertEquals(targets.length, 2);
assertFalse(isOnSameRack(dataNodes[0], targets[0]) &&
isOnSameRack(dataNodes[0], targets[1]));
}
/**
* This testcase tests re-replication,
* when dataNodes[0] and dataNodes[3] are already chosen.
* So the 1st replica should be placed on the rack that the writer resides.
* the rest replicas can be placed randomly,
* @throws Exception
*/
@Test
public void testRereplicate3() throws Exception {
setupDataNodeCapacity();
List<DatanodeStorageInfo> chosenNodes = new ArrayList<DatanodeStorageInfo>();
chosenNodes.add(storages[0]);
chosenNodes.add(storages[3]);
DatanodeStorageInfo[] targets;
targets = chooseTarget(0, chosenNodes);
assertEquals(targets.length, 0);
targets = chooseTarget(1, chosenNodes);
assertEquals(targets.length, 1);
assertTrue(isOnSameRack(dataNodes[0], targets[0]));
assertFalse(isOnSameRack(dataNodes[3], targets[0]));
targets = chooseTarget(1, dataNodes[3], chosenNodes);
assertEquals(targets.length, 1);
assertTrue(isOnSameRack(dataNodes[3], targets[0]));
assertFalse(isOnSameNodeGroup(dataNodes[3], targets[0]));
assertFalse(isOnSameRack(dataNodes[0], targets[0]));
targets = chooseTarget(2, chosenNodes);
assertEquals(targets.length, 2);
assertTrue(isOnSameRack(dataNodes[0], targets[0]));
assertFalse(isOnSameNodeGroup(dataNodes[0], targets[0]));
targets = chooseTarget(2, dataNodes[3], chosenNodes);
assertEquals(targets.length, 2);
assertTrue(isOnSameRack(dataNodes[3], targets[0]));
}
/**
* Test for the chooseReplicaToDelete are processed based on
* block locality and free space
*/
@Test
public void testChooseReplicaToDelete() throws Exception {
List<DatanodeStorageInfo> replicaList = new ArrayList<DatanodeStorageInfo>();
final Map<String, List<DatanodeStorageInfo>> rackMap
= new HashMap<String, List<DatanodeStorageInfo>>();
dataNodes[0].setRemaining(4*1024*1024);
replicaList.add(storages[0]);
dataNodes[1].setRemaining(3*1024*1024);
replicaList.add(storages[1]);
dataNodes[2].setRemaining(2*1024*1024);
replicaList.add(storages[2]);
dataNodes[5].setRemaining(1*1024*1024);
replicaList.add(storages[5]);
List<DatanodeStorageInfo> first = new ArrayList<DatanodeStorageInfo>();
List<DatanodeStorageInfo> second = new ArrayList<DatanodeStorageInfo>();
replicator.splitNodesWithRack(
replicaList, rackMap, first, second);
assertEquals(3, first.size());
assertEquals(1, second.size());
List<StorageType> excessTypes = new ArrayList<StorageType>();
excessTypes.add(StorageType.DEFAULT);
DatanodeStorageInfo chosen = replicator.chooseReplicaToDelete(
null, null, (short)3, first, second, excessTypes);
// Within first set {dataNodes[0], dataNodes[1], dataNodes[2]},
// dataNodes[0] and dataNodes[1] are in the same nodegroup,
// but dataNodes[1] is chosen as less free space
assertEquals(chosen, storages[1]);
replicator.adjustSetsWithChosenReplica(rackMap, first, second, chosen);
assertEquals(2, first.size());
assertEquals(1, second.size());
// Within first set {dataNodes[0], dataNodes[2]}, dataNodes[2] is chosen
// as less free space
excessTypes.add(StorageType.DEFAULT);
chosen = replicator.chooseReplicaToDelete(
null, null, (short)2, first, second, excessTypes);
assertEquals(chosen, storages[2]);
replicator.adjustSetsWithChosenReplica(rackMap, first, second, chosen);
assertEquals(0, first.size());
assertEquals(2, second.size());
// Within second set, dataNodes[5] with less free space
excessTypes.add(StorageType.DEFAULT);
chosen = replicator.chooseReplicaToDelete(
null, null, (short)1, first, second, excessTypes);
assertEquals(chosen, storages[5]);
}
/**
* Test replica placement policy in case of boundary topology.
* Rack 2 has only 1 node group & can't be placed with two replicas
* The 1st replica will be placed on writer.
* The 2nd replica should be placed on a different rack
* The 3rd replica should be placed on the same rack with writer, but on a
* different node group.
*/
@Test
public void testChooseTargetsOnBoundaryTopology() throws Exception {
for(int i=0; i<NUM_OF_DATANODES; i++) {
cluster.remove(dataNodes[i]);
}
for(int i=0; i<NUM_OF_DATANODES_BOUNDARY; i++) {
cluster.add(dataNodesInBoundaryCase[i]);
}
for(int i=0; i<NUM_OF_DATANODES_BOUNDARY; i++) {
updateHeartbeatWithUsage(dataNodes[0],
2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
(HdfsServerConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE,
0L, 0L, 0L, 0, 0);
updateHeartbeatWithUsage(dataNodesInBoundaryCase[i],
2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
}
DatanodeStorageInfo[] targets;
targets = chooseTarget(0, dataNodesInBoundaryCase[0]);
assertEquals(targets.length, 0);
targets = chooseTarget(1, dataNodesInBoundaryCase[0]);
assertEquals(targets.length, 1);
targets = chooseTarget(2, dataNodesInBoundaryCase[0]);
assertEquals(targets.length, 2);
assertFalse(isOnSameRack(targets[0], targets[1]));
targets = chooseTarget(3, dataNodesInBoundaryCase[0]);
assertEquals(targets.length, 3);
assertTrue(checkTargetsOnDifferentNodeGroup(targets));
}
/**
* Test re-replication policy in boundary case.
* Rack 2 has only one node group & the node in this node group is chosen
* Rack 1 has two nodegroups & one of them is chosen.
* Replica policy should choose the node from node group of Rack1 but not the
* same nodegroup with chosen nodes.
*/
@Test
public void testRereplicateOnBoundaryTopology() throws Exception {
for(int i=0; i<NUM_OF_DATANODES_BOUNDARY; i++) {
updateHeartbeatWithUsage(dataNodesInBoundaryCase[i],
2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
}
List<DatanodeStorageInfo> chosenNodes = new ArrayList<DatanodeStorageInfo>();
chosenNodes.add(storagesInBoundaryCase[0]);
chosenNodes.add(storagesInBoundaryCase[5]);
DatanodeStorageInfo[] targets;
targets = chooseTarget(1, dataNodesInBoundaryCase[0], chosenNodes);
assertFalse(isOnSameNodeGroup(dataNodesInBoundaryCase[0], targets[0]));
assertFalse(isOnSameNodeGroup(dataNodesInBoundaryCase[5], targets[0]));
assertTrue(checkTargetsOnDifferentNodeGroup(targets));
}
/**
* Test replica placement policy in case of targets more than number of
* NodeGroups.
* The 12-nodes cluster only has 6 NodeGroups, but in some cases, like:
* placing submitted job file, there is requirement to choose more (10)
* targets for placing replica. We should test it can return 6 targets.
*/
@Test
public void testChooseMoreTargetsThanNodeGroups() throws Exception {
for(int i=0; i<NUM_OF_DATANODES; i++) {
cluster.remove(dataNodes[i]);
}
for(int i=0; i<NUM_OF_DATANODES_BOUNDARY; i++) {
DatanodeDescriptor node = dataNodesInBoundaryCase[i];
if (cluster.contains(node)) {
cluster.remove(node);
}
}
for(int i=0; i<NUM_OF_DATANODES_MORE_TARGETS; i++) {
cluster.add(dataNodesInMoreTargetsCase[i]);
}
for(int i=0; i<NUM_OF_DATANODES_MORE_TARGETS; i++) {
updateHeartbeatWithUsage(dataNodesInMoreTargetsCase[i],
2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
}
DatanodeStorageInfo[] targets;
// Test normal case -- 3 replicas
targets = chooseTarget(3, dataNodesInMoreTargetsCase[0]);
assertEquals(targets.length, 3);
assertTrue(checkTargetsOnDifferentNodeGroup(targets));
// Test special case -- replica number over node groups.
targets = chooseTarget(10, dataNodesInMoreTargetsCase[0]);
assertTrue(checkTargetsOnDifferentNodeGroup(targets));
// Verify it only can find 6 targets for placing replicas.
assertEquals(targets.length, 6);
}
@Test
public void testChooseTargetWithDependencies() throws Exception {
for(int i=0; i<NUM_OF_DATANODES; i++) {
cluster.remove(dataNodes[i]);
}
for(int i=0; i<NUM_OF_DATANODES_MORE_TARGETS; i++) {
DatanodeDescriptor node = dataNodesInMoreTargetsCase[i];
if (cluster.contains(node)) {
cluster.remove(node);
}
}
Host2NodesMap host2DatanodeMap = namenode.getNamesystem()
.getBlockManager()
.getDatanodeManager().getHost2DatanodeMap();
for(int i=0; i<NUM_OF_DATANODES_FOR_DEPENDENCIES; i++) {
cluster.add(dataNodesForDependencies[i]);
host2DatanodeMap.add(dataNodesForDependencies[i]);
}
//add dependencies (node1 <-> node2, and node3<->node4)
dataNodesForDependencies[1].addDependentHostName(
dataNodesForDependencies[2].getHostName());
dataNodesForDependencies[2].addDependentHostName(
dataNodesForDependencies[1].getHostName());
dataNodesForDependencies[3].addDependentHostName(
dataNodesForDependencies[4].getHostName());
dataNodesForDependencies[4].addDependentHostName(
dataNodesForDependencies[3].getHostName());
//Update heartbeat
for(int i=0; i<NUM_OF_DATANODES_FOR_DEPENDENCIES; i++) {
updateHeartbeatWithUsage(dataNodesForDependencies[i],
2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
}
List<DatanodeStorageInfo> chosenNodes = new ArrayList<DatanodeStorageInfo>();
DatanodeStorageInfo[] targets;
Set<Node> excludedNodes = new HashSet<Node>();
excludedNodes.add(dataNodesForDependencies[5]);
//try to select three targets as there are three node groups
targets = chooseTarget(3, dataNodesForDependencies[1], chosenNodes, excludedNodes);
//Even there are three node groups, verify that
//only two targets are selected due to dependencies
assertEquals(targets.length, 2);
assertEquals(targets[0], storagesForDependencies[1]);
assertTrue(targets[1].equals(storagesForDependencies[3]) || targets[1].equals(storagesForDependencies[4]));
//verify that all data nodes are in the excluded list
assertEquals(excludedNodes.size(), NUM_OF_DATANODES_FOR_DEPENDENCIES);
for(int i=0; i<NUM_OF_DATANODES_FOR_DEPENDENCIES; i++) {
assertTrue(excludedNodes.contains(dataNodesForDependencies[i]));
}
}
}
| 30,788 | 36.777914 | 111 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.Arrays;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.blockmanagement.CorruptReplicasMap.Reason;
import org.junit.Test;
/**
* This test makes sure that
* CorruptReplicasMap::numBlocksWithCorruptReplicas and
* CorruptReplicasMap::getCorruptReplicaBlockIds
* return the correct values
*/
public class TestCorruptReplicaInfo {
private static final Log LOG =
LogFactory.getLog(TestCorruptReplicaInfo.class);
private final Map<Long, Block> block_map =
new HashMap<Long, Block>();
// Allow easy block creation by block id
// Return existing block if one with same block id already exists
private Block getBlock(Long block_id) {
if (!block_map.containsKey(block_id)) {
block_map.put(block_id, new Block(block_id,0,0));
}
return block_map.get(block_id);
}
private Block getBlock(int block_id) {
return getBlock((long)block_id);
}
@Test
public void testCorruptReplicaInfo() throws IOException,
InterruptedException {
CorruptReplicasMap crm = new CorruptReplicasMap();
// Make sure initial values are returned correctly
assertEquals("Number of corrupt blocks must initially be 0", 0, crm.size());
assertNull("Param n cannot be less than 0", crm.getCorruptReplicaBlockIdsForTesting(-1, null));
assertNull("Param n cannot be greater than 100", crm.getCorruptReplicaBlockIdsForTesting(101, null));
long[] l = crm.getCorruptReplicaBlockIdsForTesting(0, null);
assertNotNull("n = 0 must return non-null", l);
assertEquals("n = 0 must return an empty list", 0, l.length);
// create a list of block_ids. A list is used to allow easy validation of the
// output of getCorruptReplicaBlockIds
int NUM_BLOCK_IDS = 140;
List<Long> block_ids = new LinkedList<Long>();
for (int i=0;i<NUM_BLOCK_IDS;i++) {
block_ids.add((long)i);
}
DatanodeDescriptor dn1 = DFSTestUtil.getLocalDatanodeDescriptor();
DatanodeDescriptor dn2 = DFSTestUtil.getLocalDatanodeDescriptor();
addToCorruptReplicasMap(crm, getBlock(0), dn1);
assertEquals("Number of corrupt blocks not returning correctly",
1, crm.size());
addToCorruptReplicasMap(crm, getBlock(1), dn1);
assertEquals("Number of corrupt blocks not returning correctly",
2, crm.size());
addToCorruptReplicasMap(crm, getBlock(1), dn2);
assertEquals("Number of corrupt blocks not returning correctly",
2, crm.size());
crm.removeFromCorruptReplicasMap(getBlock(1));
assertEquals("Number of corrupt blocks not returning correctly",
1, crm.size());
crm.removeFromCorruptReplicasMap(getBlock(0));
assertEquals("Number of corrupt blocks not returning correctly",
0, crm.size());
for (Long block_id: block_ids) {
addToCorruptReplicasMap(crm, getBlock(block_id), dn1);
}
assertEquals("Number of corrupt blocks not returning correctly",
NUM_BLOCK_IDS, crm.size());
assertTrue("First five block ids not returned correctly ",
Arrays.equals(new long[]{0,1,2,3,4},
crm.getCorruptReplicaBlockIdsForTesting(5, null)));
LOG.info(crm.getCorruptReplicaBlockIdsForTesting(10, 7L));
LOG.info(block_ids.subList(7, 18));
assertTrue("10 blocks after 7 not returned correctly ",
Arrays.equals(new long[]{8,9,10,11,12,13,14,15,16,17},
crm.getCorruptReplicaBlockIdsForTesting(10, 7L)));
}
private static void addToCorruptReplicasMap(CorruptReplicasMap crm,
Block blk, DatanodeDescriptor dn) {
crm.addToCorruptReplicasMap(blk, dn, "TEST", Reason.NONE);
}
}
| 5,298 | 37.678832 | 107 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.util.ArrayList;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo.AddBlockResult;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.junit.Test;
/**
* This class tests that methods in DatanodeDescriptor
*/
public class TestDatanodeDescriptor {
/**
* Test that getInvalidateBlocks observes the maxlimit.
*/
@Test
public void testGetInvalidateBlocks() throws Exception {
final int MAX_BLOCKS = 10;
final int REMAINING_BLOCKS = 2;
final int MAX_LIMIT = MAX_BLOCKS - REMAINING_BLOCKS;
DatanodeDescriptor dd = DFSTestUtil.getLocalDatanodeDescriptor();
ArrayList<Block> blockList = new ArrayList<Block>(MAX_BLOCKS);
for (int i=0; i<MAX_BLOCKS; i++) {
blockList.add(new Block(i, 0, GenerationStamp.LAST_RESERVED_STAMP));
}
dd.addBlocksToBeInvalidated(blockList);
Block[] bc = dd.getInvalidateBlocks(MAX_LIMIT);
assertEquals(bc.length, MAX_LIMIT);
bc = dd.getInvalidateBlocks(MAX_LIMIT);
assertEquals(bc.length, REMAINING_BLOCKS);
}
@Test
public void testBlocksCounter() throws Exception {
DatanodeDescriptor dd = BlockManagerTestUtil.getLocalDatanodeDescriptor(true);
assertEquals(0, dd.numBlocks());
BlockInfo blk = new BlockInfoContiguous(new Block(1L), (short) 1);
BlockInfo blk1 = new BlockInfoContiguous(new Block(2L), (short) 2);
DatanodeStorageInfo[] storages = dd.getStorageInfos();
assertTrue(storages.length > 0);
// add first block
assertTrue(storages[0].addBlock(blk) == AddBlockResult.ADDED);
assertEquals(1, dd.numBlocks());
// remove a non-existent block
assertFalse(dd.removeBlock(blk1));
assertEquals(1, dd.numBlocks());
// add an existent block
assertFalse(storages[0].addBlock(blk) == AddBlockResult.ADDED);
assertEquals(1, dd.numBlocks());
// add second block
assertTrue(storages[0].addBlock(blk1) == AddBlockResult.ADDED);
assertEquals(2, dd.numBlocks());
// remove first block
assertTrue(dd.removeBlock(blk));
assertEquals(1, dd.numBlocks());
// remove second block
assertTrue(dd.removeBlock(blk1));
assertEquals(0, dd.numBlocks());
}
}
| 3,279 | 37.588235 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.Socket;
import java.util.EnumSet;
import java.util.List;
import java.util.Random;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.BlockReader;
import org.apache.hadoop.hdfs.BlockReaderFactory;
import org.apache.hadoop.hdfs.ClientContext;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.RemotePeerFactory;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
import org.apache.hadoop.hdfs.net.Peer;
import org.apache.hadoop.hdfs.net.TcpPeerServer;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
import org.apache.hadoop.hdfs.security.token.block.SecurityTestUtil;
import org.apache.hadoop.hdfs.server.balancer.TestBalancer;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.token.Token;
import org.apache.log4j.Level;
import org.junit.Assert;
import org.junit.Test;
public class TestBlockTokenWithDFS {
private static final int BLOCK_SIZE = 1024;
private static final int FILE_SIZE = 2 * BLOCK_SIZE;
private static final String FILE_TO_READ = "/fileToRead.dat";
private static final String FILE_TO_WRITE = "/fileToWrite.dat";
private static final String FILE_TO_APPEND = "/fileToAppend.dat";
private final byte[] rawData = new byte[FILE_SIZE];
{
((Log4JLogger) DFSClient.LOG).getLogger().setLevel(Level.ALL);
Random r = new Random();
r.nextBytes(rawData);
}
private void createFile(FileSystem fs, Path filename) throws IOException {
FSDataOutputStream out = fs.create(filename);
out.write(rawData);
out.close();
}
// read a file using blockSeekTo()
private boolean checkFile1(FSDataInputStream in) {
byte[] toRead = new byte[FILE_SIZE];
int totalRead = 0;
int nRead = 0;
try {
while ((nRead = in.read(toRead, totalRead, toRead.length - totalRead)) > 0) {
totalRead += nRead;
}
} catch (IOException e) {
return false;
}
assertEquals("Cannot read file.", toRead.length, totalRead);
return checkFile(toRead);
}
// read a file using fetchBlockByteRange()
private boolean checkFile2(FSDataInputStream in) {
byte[] toRead = new byte[FILE_SIZE];
try {
assertEquals("Cannot read file", toRead.length, in.read(0, toRead, 0,
toRead.length));
} catch (IOException e) {
return false;
}
return checkFile(toRead);
}
private boolean checkFile(byte[] fileToCheck) {
if (fileToCheck.length != rawData.length) {
return false;
}
for (int i = 0; i < fileToCheck.length; i++) {
if (fileToCheck[i] != rawData[i]) {
return false;
}
}
return true;
}
// creates a file and returns a descriptor for writing to it
private static FSDataOutputStream writeFile(FileSystem fileSys, Path name,
short repl, long blockSize) throws IOException {
FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), repl, blockSize);
return stm;
}
// try reading a block using a BlockReader directly
private static void tryRead(final Configuration conf, LocatedBlock lblock,
boolean shouldSucceed) {
InetSocketAddress targetAddr = null;
IOException ioe = null;
BlockReader blockReader = null;
ExtendedBlock block = lblock.getBlock();
try {
DatanodeInfo[] nodes = lblock.getLocations();
targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr());
blockReader = new BlockReaderFactory(new DfsClientConf(conf)).
setFileName(BlockReaderFactory.getFileName(targetAddr,
"test-blockpoolid", block.getBlockId())).
setBlock(block).
setBlockToken(lblock.getBlockToken()).
setInetSocketAddress(targetAddr).
setStartOffset(0).
setLength(-1).
setVerifyChecksum(true).
setClientName("TestBlockTokenWithDFS").
setDatanodeInfo(nodes[0]).
setCachingStrategy(CachingStrategy.newDefaultStrategy()).
setClientCacheContext(ClientContext.getFromConf(conf)).
setConfiguration(conf).
setRemotePeerFactory(new RemotePeerFactory() {
@Override
public Peer newConnectedPeer(InetSocketAddress addr,
Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
throws IOException {
Peer peer = null;
Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
try {
sock.connect(addr, HdfsServerConstants.READ_TIMEOUT);
sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
peer = TcpPeerServer.peerFromSocket(sock);
} finally {
if (peer == null) {
IOUtils.closeSocket(sock);
}
}
return peer;
}
}).
build();
} catch (IOException ex) {
ioe = ex;
} finally {
if (blockReader != null) {
try {
blockReader.close();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
if (shouldSucceed) {
Assert.assertNotNull("OP_READ_BLOCK: access token is invalid, "
+ "when it is expected to be valid", blockReader);
} else {
Assert.assertNotNull("OP_READ_BLOCK: access token is valid, "
+ "when it is expected to be invalid", ioe);
Assert.assertTrue(
"OP_READ_BLOCK failed due to reasons other than access token: ",
ioe instanceof InvalidBlockTokenException);
}
}
// get a conf for testing
private static Configuration getConf(int numDataNodes) {
Configuration conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
conf.setInt("io.bytes.per.checksum", BLOCK_SIZE);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, numDataNodes);
conf.setInt("ipc.client.connect.max.retries", 0);
// Set short retry timeouts so this test runs faster
conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
return conf;
}
/**
* testing that APPEND operation can handle token expiration when
* re-establishing pipeline is needed
*/
@Test
public void testAppend() throws Exception {
MiniDFSCluster cluster = null;
int numDataNodes = 2;
Configuration conf = getConf(numDataNodes);
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
cluster.waitActive();
assertEquals(numDataNodes, cluster.getDataNodes().size());
final NameNode nn = cluster.getNameNode();
final BlockManager bm = nn.getNamesystem().getBlockManager();
final BlockTokenSecretManager sm = bm.getBlockTokenSecretManager();
// set a short token lifetime (1 second)
SecurityTestUtil.setBlockTokenLifetime(sm, 1000L);
Path fileToAppend = new Path(FILE_TO_APPEND);
FileSystem fs = cluster.getFileSystem();
// write a one-byte file
FSDataOutputStream stm = writeFile(fs, fileToAppend,
(short) numDataNodes, BLOCK_SIZE);
stm.write(rawData, 0, 1);
stm.close();
// open the file again for append
stm = fs.append(fileToAppend);
int mid = rawData.length - 1;
stm.write(rawData, 1, mid - 1);
stm.hflush();
/*
* wait till token used in stm expires
*/
Token<BlockTokenIdentifier> token = DFSTestUtil.getBlockToken(stm);
while (!SecurityTestUtil.isBlockTokenExpired(token)) {
try {
Thread.sleep(10);
} catch (InterruptedException ignored) {
}
}
// remove a datanode to force re-establishing pipeline
cluster.stopDataNode(0);
// append the rest of the file
stm.write(rawData, mid, rawData.length - mid);
stm.close();
// check if append is successful
FSDataInputStream in5 = fs.open(fileToAppend);
assertTrue(checkFile1(in5));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
/**
* testing that WRITE operation can handle token expiration when
* re-establishing pipeline is needed
*/
@Test
public void testWrite() throws Exception {
MiniDFSCluster cluster = null;
int numDataNodes = 2;
Configuration conf = getConf(numDataNodes);
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
cluster.waitActive();
assertEquals(numDataNodes, cluster.getDataNodes().size());
final NameNode nn = cluster.getNameNode();
final BlockManager bm = nn.getNamesystem().getBlockManager();
final BlockTokenSecretManager sm = bm.getBlockTokenSecretManager();
// set a short token lifetime (1 second)
SecurityTestUtil.setBlockTokenLifetime(sm, 1000L);
Path fileToWrite = new Path(FILE_TO_WRITE);
FileSystem fs = cluster.getFileSystem();
FSDataOutputStream stm = writeFile(fs, fileToWrite, (short) numDataNodes,
BLOCK_SIZE);
// write a partial block
int mid = rawData.length - 1;
stm.write(rawData, 0, mid);
stm.hflush();
/*
* wait till token used in stm expires
*/
Token<BlockTokenIdentifier> token = DFSTestUtil.getBlockToken(stm);
while (!SecurityTestUtil.isBlockTokenExpired(token)) {
try {
Thread.sleep(10);
} catch (InterruptedException ignored) {
}
}
// remove a datanode to force re-establishing pipeline
cluster.stopDataNode(0);
// write the rest of the file
stm.write(rawData, mid, rawData.length - mid);
stm.close();
// check if write is successful
FSDataInputStream in4 = fs.open(fileToWrite);
assertTrue(checkFile1(in4));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test
public void testRead() throws Exception {
MiniDFSCluster cluster = null;
int numDataNodes = 2;
Configuration conf = getConf(numDataNodes);
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
cluster.waitActive();
assertEquals(numDataNodes, cluster.getDataNodes().size());
final NameNode nn = cluster.getNameNode();
final NamenodeProtocols nnProto = nn.getRpcServer();
final BlockManager bm = nn.getNamesystem().getBlockManager();
final BlockTokenSecretManager sm = bm.getBlockTokenSecretManager();
// set a short token lifetime (1 second) initially
SecurityTestUtil.setBlockTokenLifetime(sm, 1000L);
Path fileToRead = new Path(FILE_TO_READ);
FileSystem fs = cluster.getFileSystem();
createFile(fs, fileToRead);
/*
* setup for testing expiration handling of cached tokens
*/
// read using blockSeekTo(). Acquired tokens are cached in in1
FSDataInputStream in1 = fs.open(fileToRead);
assertTrue(checkFile1(in1));
// read using blockSeekTo(). Acquired tokens are cached in in2
FSDataInputStream in2 = fs.open(fileToRead);
assertTrue(checkFile1(in2));
// read using fetchBlockByteRange(). Acquired tokens are cached in in3
FSDataInputStream in3 = fs.open(fileToRead);
assertTrue(checkFile2(in3));
/*
* testing READ interface on DN using a BlockReader
*/
DFSClient client = null;
try {
client = new DFSClient(new InetSocketAddress("localhost",
cluster.getNameNodePort()), conf);
} finally {
if (client != null) client.close();
}
List<LocatedBlock> locatedBlocks = nnProto.getBlockLocations(
FILE_TO_READ, 0, FILE_SIZE).getLocatedBlocks();
LocatedBlock lblock = locatedBlocks.get(0); // first block
Token<BlockTokenIdentifier> myToken = lblock.getBlockToken();
// verify token is not expired
assertFalse(SecurityTestUtil.isBlockTokenExpired(myToken));
// read with valid token, should succeed
tryRead(conf, lblock, true);
/*
* wait till myToken and all cached tokens in in1, in2 and in3 expire
*/
while (!SecurityTestUtil.isBlockTokenExpired(myToken)) {
try {
Thread.sleep(10);
} catch (InterruptedException ignored) {
}
}
/*
* continue testing READ interface on DN using a BlockReader
*/
// verify token is expired
assertTrue(SecurityTestUtil.isBlockTokenExpired(myToken));
// read should fail
tryRead(conf, lblock, false);
// use a valid new token
lblock.setBlockToken(sm.generateToken(lblock.getBlock(),
EnumSet.of(BlockTokenIdentifier.AccessMode.READ)));
// read should succeed
tryRead(conf, lblock, true);
// use a token with wrong blockID
ExtendedBlock wrongBlock = new ExtendedBlock(lblock.getBlock()
.getBlockPoolId(), lblock.getBlock().getBlockId() + 1);
lblock.setBlockToken(sm.generateToken(wrongBlock,
EnumSet.of(BlockTokenIdentifier.AccessMode.READ)));
// read should fail
tryRead(conf, lblock, false);
// use a token with wrong access modes
lblock.setBlockToken(sm.generateToken(lblock.getBlock(),
EnumSet.of(BlockTokenIdentifier.AccessMode.WRITE,
BlockTokenIdentifier.AccessMode.COPY,
BlockTokenIdentifier.AccessMode.REPLACE)));
// read should fail
tryRead(conf, lblock, false);
// set a long token lifetime for future tokens
SecurityTestUtil.setBlockTokenLifetime(sm, 600 * 1000L);
/*
* testing that when cached tokens are expired, DFSClient will re-fetch
* tokens transparently for READ.
*/
// confirm all tokens cached in in1 are expired by now
List<LocatedBlock> lblocks = DFSTestUtil.getAllBlocks(in1);
for (LocatedBlock blk : lblocks) {
assertTrue(SecurityTestUtil.isBlockTokenExpired(blk.getBlockToken()));
}
// verify blockSeekTo() is able to re-fetch token transparently
in1.seek(0);
assertTrue(checkFile1(in1));
// confirm all tokens cached in in2 are expired by now
List<LocatedBlock> lblocks2 = DFSTestUtil.getAllBlocks(in2);
for (LocatedBlock blk : lblocks2) {
assertTrue(SecurityTestUtil.isBlockTokenExpired(blk.getBlockToken()));
}
// verify blockSeekTo() is able to re-fetch token transparently (testing
// via another interface method)
assertTrue(in2.seekToNewSource(0));
assertTrue(checkFile1(in2));
// confirm all tokens cached in in3 are expired by now
List<LocatedBlock> lblocks3 = DFSTestUtil.getAllBlocks(in3);
for (LocatedBlock blk : lblocks3) {
assertTrue(SecurityTestUtil.isBlockTokenExpired(blk.getBlockToken()));
}
// verify fetchBlockByteRange() is able to re-fetch token transparently
assertTrue(checkFile2(in3));
/*
* testing that after datanodes are restarted on the same ports, cached
* tokens should still work and there is no need to fetch new tokens from
* namenode. This test should run while namenode is down (to make sure no
* new tokens can be fetched from namenode).
*/
// restart datanodes on the same ports that they currently use
assertTrue(cluster.restartDataNodes(true));
cluster.waitActive();
assertEquals(numDataNodes, cluster.getDataNodes().size());
cluster.shutdownNameNode(0);
// confirm tokens cached in in1 are still valid
lblocks = DFSTestUtil.getAllBlocks(in1);
for (LocatedBlock blk : lblocks) {
assertFalse(SecurityTestUtil.isBlockTokenExpired(blk.getBlockToken()));
}
// verify blockSeekTo() still works (forced to use cached tokens)
in1.seek(0);
assertTrue(checkFile1(in1));
// confirm tokens cached in in2 are still valid
lblocks2 = DFSTestUtil.getAllBlocks(in2);
for (LocatedBlock blk : lblocks2) {
assertFalse(SecurityTestUtil.isBlockTokenExpired(blk.getBlockToken()));
}
// verify blockSeekTo() still works (forced to use cached tokens)
in2.seekToNewSource(0);
assertTrue(checkFile1(in2));
// confirm tokens cached in in3 are still valid
lblocks3 = DFSTestUtil.getAllBlocks(in3);
for (LocatedBlock blk : lblocks3) {
assertFalse(SecurityTestUtil.isBlockTokenExpired(blk.getBlockToken()));
}
// verify fetchBlockByteRange() still works (forced to use cached tokens)
assertTrue(checkFile2(in3));
/*
* testing that when namenode is restarted, cached tokens should still
* work and there is no need to fetch new tokens from namenode. Like the
* previous test, this test should also run while namenode is down. The
* setup for this test depends on the previous test.
*/
// restart the namenode and then shut it down for test
cluster.restartNameNode(0);
cluster.shutdownNameNode(0);
// verify blockSeekTo() still works (forced to use cached tokens)
in1.seek(0);
assertTrue(checkFile1(in1));
// verify again blockSeekTo() still works (forced to use cached tokens)
in2.seekToNewSource(0);
assertTrue(checkFile1(in2));
// verify fetchBlockByteRange() still works (forced to use cached tokens)
assertTrue(checkFile2(in3));
/*
* testing that after both namenode and datanodes got restarted (namenode
* first, followed by datanodes), DFSClient can't access DN without
* re-fetching tokens and is able to re-fetch tokens transparently. The
* setup of this test depends on the previous test.
*/
// restore the cluster and restart the datanodes for test
cluster.restartNameNode(0);
assertTrue(cluster.restartDataNodes(true));
cluster.waitActive();
assertEquals(numDataNodes, cluster.getDataNodes().size());
// shutdown namenode so that DFSClient can't get new tokens from namenode
cluster.shutdownNameNode(0);
// verify blockSeekTo() fails (cached tokens become invalid)
in1.seek(0);
assertFalse(checkFile1(in1));
// verify fetchBlockByteRange() fails (cached tokens become invalid)
assertFalse(checkFile2(in3));
// restart the namenode to allow DFSClient to re-fetch tokens
cluster.restartNameNode(0);
// verify blockSeekTo() works again (by transparently re-fetching
// tokens from namenode)
in1.seek(0);
assertTrue(checkFile1(in1));
in2.seekToNewSource(0);
assertTrue(checkFile1(in2));
// verify fetchBlockByteRange() works again (by transparently
// re-fetching tokens from namenode)
assertTrue(checkFile2(in3));
/*
* testing that when datanodes are restarted on different ports, DFSClient
* is able to re-fetch tokens transparently to connect to them
*/
// restart datanodes on newly assigned ports
assertTrue(cluster.restartDataNodes(false));
cluster.waitActive();
assertEquals(numDataNodes, cluster.getDataNodes().size());
// verify blockSeekTo() is able to re-fetch token transparently
in1.seek(0);
assertTrue(checkFile1(in1));
// verify blockSeekTo() is able to re-fetch token transparently
in2.seekToNewSource(0);
assertTrue(checkFile1(in2));
// verify fetchBlockByteRange() is able to re-fetch token transparently
assertTrue(checkFile2(in3));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
/**
* Integration testing of access token, involving NN, DN, and Balancer
*/
@Test
public void testEnd2End() throws Exception {
Configuration conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
new TestBalancer().integrationTest(conf);
}
}
| 22,297 | 36.350084 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ThreadLocalRandom;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.LogVerificationAppender;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.TestBlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.StatefulBlockInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.Namesystem;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node;
import org.apache.hadoop.test.PathUtils;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.apache.log4j.spi.LoggingEvent;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
public class TestReplicationPolicy {
{
((Log4JLogger)BlockPlacementPolicy.LOG).getLogger().setLevel(Level.ALL);
}
private static final int BLOCK_SIZE = 1024;
private static final int NUM_OF_DATANODES = 6;
private static NetworkTopology cluster;
private static NameNode namenode;
private static BlockPlacementPolicy replicator;
private static final String filename = "/dummyfile.txt";
private static DatanodeDescriptor[] dataNodes;
private static DatanodeStorageInfo[] storages;
// The interval for marking a datanode as stale,
private static final long staleInterval =
DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_DEFAULT;
@Rule
public ExpectedException exception = ExpectedException.none();
private static void updateHeartbeatWithUsage(DatanodeDescriptor dn,
long capacity, long dfsUsed, long remaining, long blockPoolUsed,
long dnCacheCapacity, long dnCacheUsed, int xceiverCount, int volFailures) {
dn.getStorageInfos()[0].setUtilizationForTesting(
capacity, dfsUsed, remaining, blockPoolUsed);
dn.updateHeartbeat(
BlockManagerTestUtil.getStorageReportsForDatanode(dn),
dnCacheCapacity, dnCacheUsed, xceiverCount, volFailures, null);
}
@BeforeClass
public static void setupCluster() throws Exception {
Configuration conf = new HdfsConfiguration();
final String[] racks = {
"/d1/r1",
"/d1/r1",
"/d1/r2",
"/d1/r2",
"/d2/r3",
"/d2/r3"};
storages = DFSTestUtil.createDatanodeStorageInfos(racks);
dataNodes = DFSTestUtil.toDatanodeDescriptor(storages);
FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
new File(baseDir, "name").getPath());
conf.setBoolean(
DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true);
conf.setBoolean(
DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, true);
DFSTestUtil.formatNameNode(conf);
namenode = new NameNode(conf);
final BlockManager bm = namenode.getNamesystem().getBlockManager();
replicator = bm.getBlockPlacementPolicy();
cluster = bm.getDatanodeManager().getNetworkTopology();
// construct network topology
for (int i=0; i < NUM_OF_DATANODES; i++) {
cluster.add(dataNodes[i]);
bm.getDatanodeManager().getHeartbeatManager().addDatanode(
dataNodes[i]);
}
for (int i=0; i < NUM_OF_DATANODES; i++) {
updateHeartbeatWithUsage(dataNodes[i],
2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
}
}
private static boolean isOnSameRack(DatanodeStorageInfo left, DatanodeStorageInfo right) {
return isOnSameRack(left, right.getDatanodeDescriptor());
}
private static boolean isOnSameRack(DatanodeStorageInfo left, DatanodeDescriptor right) {
return cluster.isOnSameRack(left.getDatanodeDescriptor(), right);
}
/**
* In this testcase, client is dataNodes[0]. So the 1st replica should be
* placed on dataNodes[0], the 2nd replica should be placed on
* different rack and third should be placed on different node
* of rack chosen for 2nd node.
* The only excpetion is when the <i>numOfReplicas</i> is 2,
* the 1st is on dataNodes[0] and the 2nd is on a different rack.
* @throws Exception
*/
@Test
public void testChooseTarget1() throws Exception {
updateHeartbeatWithUsage(dataNodes[0],
2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
0L, 0L, 4, 0); // overloaded
DatanodeStorageInfo[] targets;
targets = chooseTarget(0);
assertEquals(targets.length, 0);
targets = chooseTarget(1);
assertEquals(targets.length, 1);
assertEquals(storages[0], targets[0]);
targets = chooseTarget(2);
assertEquals(targets.length, 2);
assertEquals(storages[0], targets[0]);
assertFalse(isOnSameRack(targets[0], targets[1]));
targets = chooseTarget(3);
assertEquals(targets.length, 3);
assertEquals(storages[0], targets[0]);
assertFalse(isOnSameRack(targets[0], targets[1]));
assertTrue(isOnSameRack(targets[1], targets[2]));
targets = chooseTarget(4);
assertEquals(targets.length, 4);
assertEquals(storages[0], targets[0]);
assertTrue(isOnSameRack(targets[1], targets[2]) ||
isOnSameRack(targets[2], targets[3]));
assertFalse(isOnSameRack(targets[0], targets[2]));
updateHeartbeatWithUsage(dataNodes[0],
2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
}
private static DatanodeStorageInfo[] chooseTarget(int numOfReplicas) {
return chooseTarget(numOfReplicas, dataNodes[0]);
}
private static DatanodeStorageInfo[] chooseTarget(int numOfReplicas,
DatanodeDescriptor writer) {
return chooseTarget(numOfReplicas, writer,
new ArrayList<DatanodeStorageInfo>());
}
private static DatanodeStorageInfo[] chooseTarget(int numOfReplicas,
List<DatanodeStorageInfo> chosenNodes) {
return chooseTarget(numOfReplicas, dataNodes[0], chosenNodes);
}
private static DatanodeStorageInfo[] chooseTarget(int numOfReplicas,
DatanodeDescriptor writer, List<DatanodeStorageInfo> chosenNodes) {
return chooseTarget(numOfReplicas, writer, chosenNodes, null);
}
private static DatanodeStorageInfo[] chooseTarget(int numOfReplicas,
List<DatanodeStorageInfo> chosenNodes, Set<Node> excludedNodes) {
return chooseTarget(numOfReplicas, dataNodes[0], chosenNodes, excludedNodes);
}
private static DatanodeStorageInfo[] chooseTarget(
int numOfReplicas,
DatanodeDescriptor writer,
List<DatanodeStorageInfo> chosenNodes,
Set<Node> excludedNodes) {
return replicator.chooseTarget(filename, numOfReplicas, writer, chosenNodes,
false, excludedNodes, BLOCK_SIZE, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY);
}
/**
* In this testcase, client is dataNodes[0], but the dataNodes[1] is
* not allowed to be chosen. So the 1st replica should be
* placed on dataNodes[0], the 2nd replica should be placed on a different
* rack, the 3rd should be on same rack as the 2nd replica, and the rest
* should be placed on a third rack.
* @throws Exception
*/
@Test
public void testChooseTarget2() throws Exception {
Set<Node> excludedNodes;
DatanodeStorageInfo[] targets;
List<DatanodeStorageInfo> chosenNodes = new ArrayList<DatanodeStorageInfo>();
excludedNodes = new HashSet<Node>();
excludedNodes.add(dataNodes[1]);
targets = chooseTarget(0, chosenNodes, excludedNodes);
assertEquals(targets.length, 0);
excludedNodes.clear();
chosenNodes.clear();
excludedNodes.add(dataNodes[1]);
targets = chooseTarget(1, chosenNodes, excludedNodes);
assertEquals(targets.length, 1);
assertEquals(storages[0], targets[0]);
excludedNodes.clear();
chosenNodes.clear();
excludedNodes.add(dataNodes[1]);
targets = chooseTarget(2, chosenNodes, excludedNodes);
assertEquals(targets.length, 2);
assertEquals(storages[0], targets[0]);
assertFalse(isOnSameRack(targets[0], targets[1]));
excludedNodes.clear();
chosenNodes.clear();
excludedNodes.add(dataNodes[1]);
targets = chooseTarget(3, chosenNodes, excludedNodes);
assertEquals(targets.length, 3);
assertEquals(storages[0], targets[0]);
assertFalse(isOnSameRack(targets[0], targets[1]));
assertTrue(isOnSameRack(targets[1], targets[2]));
excludedNodes.clear();
chosenNodes.clear();
excludedNodes.add(dataNodes[1]);
targets = chooseTarget(4, chosenNodes, excludedNodes);
assertEquals(targets.length, 4);
assertEquals(storages[0], targets[0]);
for(int i=1; i<4; i++) {
assertFalse(isOnSameRack(targets[0], targets[i]));
}
assertTrue(isOnSameRack(targets[1], targets[2]) ||
isOnSameRack(targets[2], targets[3]));
assertFalse(isOnSameRack(targets[1], targets[3]));
excludedNodes.clear();
chosenNodes.clear();
excludedNodes.add(dataNodes[1]);
chosenNodes.add(storages[2]);
targets = replicator.chooseTarget(filename, 1, dataNodes[0], chosenNodes, true,
excludedNodes, BLOCK_SIZE, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY);
System.out.println("targets=" + Arrays.asList(targets));
assertEquals(2, targets.length);
//make sure that the chosen node is in the target.
int i = 0;
for (; i < targets.length && !storages[2].equals(targets[i]); i++);
assertTrue(i < targets.length);
}
/**
* In this testcase, client is dataNodes[0], but dataNodes[0] is not qualified
* to be chosen. So the 1st replica should be placed on dataNodes[1],
* the 2nd replica should be placed on a different rack,
* the 3rd replica should be placed on the same rack as the 2nd replica,
* and the rest should be placed on the third rack.
* @throws Exception
*/
@Test
public void testChooseTarget3() throws Exception {
// make data node 0 to be not qualified to choose
updateHeartbeatWithUsage(dataNodes[0],
2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
(HdfsServerConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L,
0L, 0L, 0, 0); // no space
DatanodeStorageInfo[] targets;
targets = chooseTarget(0);
assertEquals(targets.length, 0);
targets = chooseTarget(1);
assertEquals(targets.length, 1);
assertEquals(storages[1], targets[0]);
targets = chooseTarget(2);
assertEquals(targets.length, 2);
assertEquals(storages[1], targets[0]);
assertFalse(isOnSameRack(targets[0], targets[1]));
targets = chooseTarget(3);
assertEquals(targets.length, 3);
assertEquals(storages[1], targets[0]);
assertTrue(isOnSameRack(targets[1], targets[2]));
assertFalse(isOnSameRack(targets[0], targets[1]));
targets = chooseTarget(4);
assertEquals(targets.length, 4);
assertEquals(storages[1], targets[0]);
for(int i=1; i<4; i++) {
assertFalse(isOnSameRack(targets[0], targets[i]));
}
assertTrue(isOnSameRack(targets[1], targets[2]) ||
isOnSameRack(targets[2], targets[3]));
assertFalse(isOnSameRack(targets[1], targets[3]));
updateHeartbeatWithUsage(dataNodes[0],
2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
}
/**
* In this testcase, client is dataNodes[0], but none of the nodes on rack 1
* is qualified to be chosen. So the 1st replica should be placed on either
* rack 2 or rack 3.
* the 2nd replica should be placed on a different rack,
* the 3rd replica should be placed on the same rack as the 1st replica,
* @throws Exception
*/
@Test
public void testChoooseTarget4() throws Exception {
// make data node 0 & 1 to be not qualified to choose: not enough disk space
for(int i=0; i<2; i++) {
updateHeartbeatWithUsage(dataNodes[i],
2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
(HdfsServerConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
}
DatanodeStorageInfo[] targets;
targets = chooseTarget(0);
assertEquals(targets.length, 0);
targets = chooseTarget(1);
assertEquals(targets.length, 1);
assertFalse(isOnSameRack(targets[0], dataNodes[0]));
targets = chooseTarget(2);
assertEquals(targets.length, 2);
assertFalse(isOnSameRack(targets[0], dataNodes[0]));
assertFalse(isOnSameRack(targets[0], targets[1]));
targets = chooseTarget(3);
assertEquals(targets.length, 3);
for(int i=0; i<3; i++) {
assertFalse(isOnSameRack(targets[i], dataNodes[0]));
}
assertTrue(isOnSameRack(targets[0], targets[1]) ||
isOnSameRack(targets[1], targets[2]));
assertFalse(isOnSameRack(targets[0], targets[2]));
for(int i=0; i<2; i++) {
updateHeartbeatWithUsage(dataNodes[i],
2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
}
}
/**
* In this testcase, client is is a node outside of file system.
* So the 1st replica can be placed on any node.
* the 2nd replica should be placed on a different rack,
* the 3rd replica should be placed on the same rack as the 2nd replica,
* @throws Exception
*/
@Test
public void testChooseTarget5() throws Exception {
DatanodeDescriptor writerDesc =
DFSTestUtil.getDatanodeDescriptor("7.7.7.7", "/d2/r4");
DatanodeStorageInfo[] targets;
targets = chooseTarget(0, writerDesc);
assertEquals(targets.length, 0);
targets = chooseTarget(1, writerDesc);
assertEquals(targets.length, 1);
targets = chooseTarget(2, writerDesc);
assertEquals(targets.length, 2);
assertFalse(isOnSameRack(targets[0], targets[1]));
targets = chooseTarget(3, writerDesc);
assertEquals(targets.length, 3);
assertTrue(isOnSameRack(targets[1], targets[2]));
assertFalse(isOnSameRack(targets[0], targets[1]));
}
/**
* In this testcase, there are enough total number of nodes, but only
* one rack is actually available.
* @throws Exception
*/
@Test
public void testChooseTarget6() throws Exception {
DatanodeStorageInfo storage = DFSTestUtil.createDatanodeStorageInfo(
"DS-xxxx", "7.7.7.7", "/d2/r3", "host7");
DatanodeDescriptor newDn = storage.getDatanodeDescriptor();
Set<Node> excludedNodes;
List<DatanodeStorageInfo> chosenNodes = new ArrayList<DatanodeStorageInfo>();
excludedNodes = new HashSet<Node>();
excludedNodes.add(dataNodes[0]);
excludedNodes.add(dataNodes[1]);
excludedNodes.add(dataNodes[2]);
excludedNodes.add(dataNodes[3]);
DatanodeStorageInfo[] targets;
// Only two nodes available in a rack. Try picking two nodes. Only one
// should return.
targets = chooseTarget(2, chosenNodes, excludedNodes);
assertEquals(1, targets.length);
// Make three nodes available in a rack.
final BlockManager bm = namenode.getNamesystem().getBlockManager();
bm.getDatanodeManager().getNetworkTopology().add(newDn);
bm.getDatanodeManager().getHeartbeatManager().addDatanode(newDn);
updateHeartbeatWithUsage(newDn,
2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
// Try picking three nodes. Only two should return.
excludedNodes.clear();
excludedNodes.add(dataNodes[0]);
excludedNodes.add(dataNodes[1]);
excludedNodes.add(dataNodes[2]);
excludedNodes.add(dataNodes[3]);
chosenNodes.clear();
try {
targets = chooseTarget(3, chosenNodes, excludedNodes);
assertEquals(2, targets.length);
} finally {
bm.getDatanodeManager().getNetworkTopology().remove(newDn);
}
}
/**
* In this testcase, it tries to choose more targets than available nodes and
* check the result, with stale node avoidance on the write path enabled.
* @throws Exception
*/
@Test
public void testChooseTargetWithMoreThanAvailableNodesWithStaleness()
throws Exception {
try {
namenode.getNamesystem().getBlockManager().getDatanodeManager()
.setNumStaleNodes(NUM_OF_DATANODES);
testChooseTargetWithMoreThanAvailableNodes();
} finally {
namenode.getNamesystem().getBlockManager().getDatanodeManager()
.setNumStaleNodes(0);
}
}
/**
* In this testcase, it tries to choose more targets than available nodes and
* check the result.
* @throws Exception
*/
@Test
public void testChooseTargetWithMoreThanAvailableNodes() throws Exception {
// make data node 0 & 1 to be not qualified to choose: not enough disk space
for(int i=0; i<2; i++) {
updateHeartbeatWithUsage(dataNodes[i],
2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
(HdfsServerConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
}
final LogVerificationAppender appender = new LogVerificationAppender();
final Logger logger = Logger.getRootLogger();
logger.addAppender(appender);
// try to choose NUM_OF_DATANODES which is more than actually available
// nodes.
DatanodeStorageInfo[] targets = chooseTarget(NUM_OF_DATANODES);
assertEquals(targets.length, NUM_OF_DATANODES - 2);
final List<LoggingEvent> log = appender.getLog();
assertNotNull(log);
assertFalse(log.size() == 0);
final LoggingEvent lastLogEntry = log.get(log.size() - 1);
assertTrue(Level.WARN.isGreaterOrEqual(lastLogEntry.getLevel()));
// Suppose to place replicas on each node but two data nodes are not
// available for placing replica, so here we expect a short of 2
assertTrue(((String)lastLogEntry.getMessage()).contains("in need of 2"));
for(int i=0; i<2; i++) {
updateHeartbeatWithUsage(dataNodes[i],
2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
}
}
private boolean containsWithinRange(DatanodeStorageInfo target,
DatanodeDescriptor[] nodes, int startIndex, int endIndex) {
assert startIndex >= 0 && startIndex < nodes.length;
assert endIndex >= startIndex && endIndex < nodes.length;
for (int i = startIndex; i <= endIndex; i++) {
if (nodes[i].equals(target.getDatanodeDescriptor())) {
return true;
}
}
return false;
}
private boolean containsWithinRange(DatanodeDescriptor target,
DatanodeStorageInfo[] nodes, int startIndex, int endIndex) {
assert startIndex >= 0 && startIndex < nodes.length;
assert endIndex >= startIndex && endIndex < nodes.length;
for (int i = startIndex; i <= endIndex; i++) {
if (nodes[i].getDatanodeDescriptor().equals(target)) {
return true;
}
}
return false;
}
@Test
public void testChooseTargetWithStaleNodes() throws Exception {
// Set dataNodes[0] as stale
DFSTestUtil.resetLastUpdatesWithOffset(dataNodes[0], -(staleInterval + 1));
namenode.getNamesystem().getBlockManager()
.getDatanodeManager().getHeartbeatManager().heartbeatCheck();
assertTrue(namenode.getNamesystem().getBlockManager()
.getDatanodeManager().shouldAvoidStaleDataNodesForWrite());
DatanodeStorageInfo[] targets;
// We set the datanode[0] as stale, thus should choose datanode[1] since
// datanode[1] is on the same rack with datanode[0] (writer)
targets = chooseTarget(1);
assertEquals(targets.length, 1);
assertEquals(storages[1], targets[0]);
Set<Node> excludedNodes = new HashSet<Node>();
excludedNodes.add(dataNodes[1]);
List<DatanodeStorageInfo> chosenNodes = new ArrayList<DatanodeStorageInfo>();
targets = chooseTarget(1, chosenNodes, excludedNodes);
assertEquals(targets.length, 1);
assertFalse(isOnSameRack(targets[0], dataNodes[0]));
// reset
DFSTestUtil.resetLastUpdatesWithOffset(dataNodes[0], 0);
namenode.getNamesystem().getBlockManager()
.getDatanodeManager().getHeartbeatManager().heartbeatCheck();
}
/**
* In this testcase, we set 3 nodes (dataNodes[0] ~ dataNodes[2]) as stale,
* and when the number of replicas is less or equal to 3, all the healthy
* datanodes should be returned by the chooseTarget method. When the number
* of replicas is 4, a stale node should be included.
*
* @throws Exception
*/
@Test
public void testChooseTargetWithHalfStaleNodes() throws Exception {
// Set dataNodes[0], dataNodes[1], and dataNodes[2] as stale
for (int i = 0; i < 3; i++) {
DFSTestUtil
.resetLastUpdatesWithOffset(dataNodes[i], -(staleInterval + 1));
}
namenode.getNamesystem().getBlockManager()
.getDatanodeManager().getHeartbeatManager().heartbeatCheck();
DatanodeStorageInfo[] targets = chooseTarget(0);
assertEquals(targets.length, 0);
// Since we have 6 datanodes total, stale nodes should
// not be returned until we ask for more than 3 targets
targets = chooseTarget(1);
assertEquals(targets.length, 1);
assertFalse(containsWithinRange(targets[0], dataNodes, 0, 2));
targets = chooseTarget(2);
assertEquals(targets.length, 2);
assertFalse(containsWithinRange(targets[0], dataNodes, 0, 2));
assertFalse(containsWithinRange(targets[1], dataNodes, 0, 2));
targets = chooseTarget(3);
assertEquals(targets.length, 3);
assertTrue(containsWithinRange(targets[0], dataNodes, 3, 5));
assertTrue(containsWithinRange(targets[1], dataNodes, 3, 5));
assertTrue(containsWithinRange(targets[2], dataNodes, 3, 5));
targets = chooseTarget(4);
assertEquals(targets.length, 4);
assertTrue(containsWithinRange(dataNodes[3], targets, 0, 3));
assertTrue(containsWithinRange(dataNodes[4], targets, 0, 3));
assertTrue(containsWithinRange(dataNodes[5], targets, 0, 3));
for (int i = 0; i < dataNodes.length; i++) {
DFSTestUtil.resetLastUpdatesWithOffset(dataNodes[i], 0);
}
namenode.getNamesystem().getBlockManager()
.getDatanodeManager().getHeartbeatManager().heartbeatCheck();
}
@Test
public void testChooseTargetWithMoreThanHalfStaleNodes() throws Exception {
HdfsConfiguration conf = new HdfsConfiguration();
conf.setBoolean(
DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, true);
String[] hosts = new String[]{"host1", "host2", "host3",
"host4", "host5", "host6"};
String[] racks = new String[]{"/d1/r1", "/d1/r1", "/d1/r2",
"/d1/r2", "/d2/r3", "/d2/r3"};
MiniDFSCluster miniCluster = new MiniDFSCluster.Builder(conf).racks(racks)
.hosts(hosts).numDataNodes(hosts.length).build();
miniCluster.waitActive();
try {
// Step 1. Make two datanodes as stale, check whether the
// avoidStaleDataNodesForWrite calculation is correct.
// First stop the heartbeat of host1 and host2
for (int i = 0; i < 2; i++) {
DataNode dn = miniCluster.getDataNodes().get(i);
DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
DatanodeDescriptor dnDes = miniCluster.getNameNode().getNamesystem()
.getBlockManager().getDatanodeManager()
.getDatanode(dn.getDatanodeId());
DFSTestUtil.resetLastUpdatesWithOffset(dnDes, -(staleInterval + 1));
}
// Instead of waiting, explicitly call heartbeatCheck to
// let heartbeat manager to detect stale nodes
miniCluster.getNameNode().getNamesystem().getBlockManager()
.getDatanodeManager().getHeartbeatManager().heartbeatCheck();
int numStaleNodes = miniCluster.getNameNode().getNamesystem()
.getBlockManager().getDatanodeManager().getNumStaleNodes();
assertEquals(numStaleNodes, 2);
assertTrue(miniCluster.getNameNode().getNamesystem().getBlockManager()
.getDatanodeManager().shouldAvoidStaleDataNodesForWrite());
// Call chooseTarget
DatanodeDescriptor staleNodeInfo = miniCluster.getNameNode()
.getNamesystem().getBlockManager().getDatanodeManager()
.getDatanode(miniCluster.getDataNodes().get(0).getDatanodeId());
BlockPlacementPolicy replicator = miniCluster.getNameNode()
.getNamesystem().getBlockManager().getBlockPlacementPolicy();
DatanodeStorageInfo[] targets = replicator.chooseTarget(filename, 3,
staleNodeInfo, new ArrayList<DatanodeStorageInfo>(), false, null,
BLOCK_SIZE, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY);
assertEquals(targets.length, 3);
assertFalse(isOnSameRack(targets[0], staleNodeInfo));
// Step 2. Set more than half of the datanodes as stale
for (int i = 0; i < 4; i++) {
DataNode dn = miniCluster.getDataNodes().get(i);
DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
DatanodeDescriptor dnDesc = miniCluster.getNameNode().getNamesystem().getBlockManager()
.getDatanodeManager().getDatanode(dn.getDatanodeId());
DFSTestUtil.resetLastUpdatesWithOffset(dnDesc, -(staleInterval + 1));
}
// Explicitly call heartbeatCheck
miniCluster.getNameNode().getNamesystem().getBlockManager()
.getDatanodeManager().getHeartbeatManager().heartbeatCheck();
numStaleNodes = miniCluster.getNameNode().getNamesystem()
.getBlockManager().getDatanodeManager().getNumStaleNodes();
assertEquals(numStaleNodes, 4);
// According to our strategy, stale datanodes will be included for writing
// to avoid hotspots
assertFalse(miniCluster.getNameNode().getNamesystem().getBlockManager()
.getDatanodeManager().shouldAvoidStaleDataNodesForWrite());
// Call chooseTarget
targets = replicator.chooseTarget(filename, 3, staleNodeInfo,
new ArrayList<DatanodeStorageInfo>(), false, null, BLOCK_SIZE,
TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY);
assertEquals(targets.length, 3);
assertTrue(isOnSameRack(targets[0], staleNodeInfo));
// Step 3. Set 2 stale datanodes back to healthy nodes,
// still have 2 stale nodes
for (int i = 2; i < 4; i++) {
DataNode dn = miniCluster.getDataNodes().get(i);
DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, false);
DatanodeDescriptor dnDesc = miniCluster.getNameNode().getNamesystem()
.getBlockManager().getDatanodeManager()
.getDatanode(dn.getDatanodeId());
DFSTestUtil.resetLastUpdatesWithOffset(dnDesc, 0);
}
// Explicitly call heartbeatCheck
miniCluster.getNameNode().getNamesystem().getBlockManager()
.getDatanodeManager().getHeartbeatManager().heartbeatCheck();
numStaleNodes = miniCluster.getNameNode().getNamesystem()
.getBlockManager().getDatanodeManager().getNumStaleNodes();
assertEquals(numStaleNodes, 2);
assertTrue(miniCluster.getNameNode().getNamesystem().getBlockManager()
.getDatanodeManager().shouldAvoidStaleDataNodesForWrite());
// Call chooseTarget
targets = chooseTarget(3, staleNodeInfo);
assertEquals(targets.length, 3);
assertFalse(isOnSameRack(targets[0], staleNodeInfo));
} finally {
miniCluster.shutdown();
}
}
/**
* This testcase tests re-replication, when dataNodes[0] is already chosen.
* So the 1st replica can be placed on random rack.
* the 2nd replica should be placed on different node by same rack as
* the 1st replica. The 3rd replica can be placed randomly.
* @throws Exception
*/
@Test
public void testRereplicate1() throws Exception {
List<DatanodeStorageInfo> chosenNodes = new ArrayList<DatanodeStorageInfo>();
chosenNodes.add(storages[0]);
DatanodeStorageInfo[] targets;
targets = chooseTarget(0, chosenNodes);
assertEquals(targets.length, 0);
targets = chooseTarget(1, chosenNodes);
assertEquals(targets.length, 1);
assertFalse(isOnSameRack(targets[0], dataNodes[0]));
targets = chooseTarget(2, chosenNodes);
assertEquals(targets.length, 2);
assertTrue(isOnSameRack(targets[0], dataNodes[0]));
assertFalse(isOnSameRack(targets[0], targets[1]));
targets = chooseTarget(3, chosenNodes);
assertEquals(targets.length, 3);
assertTrue(isOnSameRack(targets[0], dataNodes[0]));
assertFalse(isOnSameRack(targets[0], targets[2]));
}
/**
* This testcase tests re-replication,
* when dataNodes[0] and dataNodes[1] are already chosen.
* So the 1st replica should be placed on a different rack than rack 1.
* the rest replicas can be placed randomly,
* @throws Exception
*/
@Test
public void testRereplicate2() throws Exception {
List<DatanodeStorageInfo> chosenNodes = new ArrayList<DatanodeStorageInfo>();
chosenNodes.add(storages[0]);
chosenNodes.add(storages[1]);
DatanodeStorageInfo[] targets;
targets = chooseTarget(0, chosenNodes);
assertEquals(targets.length, 0);
targets = chooseTarget(1, chosenNodes);
assertEquals(targets.length, 1);
assertFalse(isOnSameRack(targets[0], dataNodes[0]));
targets = chooseTarget(2, chosenNodes);
assertEquals(targets.length, 2);
assertFalse(isOnSameRack(targets[0], dataNodes[0]));
assertFalse(isOnSameRack(targets[1], dataNodes[0]));
}
/**
* This testcase tests re-replication,
* when dataNodes[0] and dataNodes[2] are already chosen.
* So the 1st replica should be placed on the rack that the writer resides.
* the rest replicas can be placed randomly,
* @throws Exception
*/
@Test
public void testRereplicate3() throws Exception {
List<DatanodeStorageInfo> chosenNodes = new ArrayList<DatanodeStorageInfo>();
chosenNodes.add(storages[0]);
chosenNodes.add(storages[2]);
DatanodeStorageInfo[] targets;
targets = chooseTarget(0, chosenNodes);
assertEquals(targets.length, 0);
targets = chooseTarget(1, chosenNodes);
assertEquals(targets.length, 1);
assertTrue(isOnSameRack(targets[0], dataNodes[0]));
assertFalse(isOnSameRack(targets[0], dataNodes[2]));
targets = chooseTarget(1, dataNodes[2], chosenNodes);
assertEquals(targets.length, 1);
assertTrue(isOnSameRack(targets[0], dataNodes[2]));
assertFalse(isOnSameRack(targets[0], dataNodes[0]));
targets = chooseTarget(2, chosenNodes);
assertEquals(targets.length, 2);
assertTrue(isOnSameRack(targets[0], dataNodes[0]));
targets = chooseTarget(2, dataNodes[2], chosenNodes);
assertEquals(targets.length, 2);
assertTrue(isOnSameRack(targets[0], dataNodes[2]));
}
private BlockInfo genBlockInfo(long id) {
return new BlockInfoContiguous(new Block(id), (short) 3);
}
/**
* Test for the high priority blocks are processed before the low priority
* blocks.
*/
@Test(timeout = 60000)
public void testReplicationWithPriority() throws Exception {
int DFS_NAMENODE_REPLICATION_INTERVAL = 1000;
int HIGH_PRIORITY = 0;
Configuration conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2)
.format(true).build();
try {
cluster.waitActive();
final UnderReplicatedBlocks neededReplications = cluster.getNameNode()
.getNamesystem().getBlockManager().neededReplications;
for (int i = 0; i < 100; i++) {
// Adding the blocks directly to normal priority
neededReplications.add(genBlockInfo(ThreadLocalRandom.current().
nextLong()), 2, 0, 3);
}
// Lets wait for the replication interval, to start process normal
// priority blocks
Thread.sleep(DFS_NAMENODE_REPLICATION_INTERVAL);
// Adding the block directly to high priority list
neededReplications.add(genBlockInfo(ThreadLocalRandom.current().
nextLong()), 1, 0, 3);
// Lets wait for the replication interval
Thread.sleep(DFS_NAMENODE_REPLICATION_INTERVAL);
// Check replication completed successfully. Need not wait till it process
// all the 100 normal blocks.
assertFalse("Not able to clear the element from high priority list",
neededReplications.iterator(HIGH_PRIORITY).hasNext());
} finally {
cluster.shutdown();
}
}
/**
* Test for the ChooseUnderReplicatedBlocks are processed based on priority
*/
@Test
public void testChooseUnderReplicatedBlocks() throws Exception {
UnderReplicatedBlocks underReplicatedBlocks = new UnderReplicatedBlocks();
for (int i = 0; i < 5; i++) {
// Adding QUEUE_HIGHEST_PRIORITY block
underReplicatedBlocks.add(genBlockInfo(ThreadLocalRandom.current().
nextLong()), 1, 0, 3);
// Adding QUEUE_VERY_UNDER_REPLICATED block
underReplicatedBlocks.add(genBlockInfo(ThreadLocalRandom.current().
nextLong()), 2, 0, 7);
// Adding QUEUE_REPLICAS_BADLY_DISTRIBUTED block
underReplicatedBlocks.add(genBlockInfo(ThreadLocalRandom.current().
nextLong()), 6, 0, 6);
// Adding QUEUE_UNDER_REPLICATED block
underReplicatedBlocks.add(genBlockInfo(ThreadLocalRandom.current().
nextLong()), 5, 0, 6);
// Adding QUEUE_WITH_CORRUPT_BLOCKS block
underReplicatedBlocks.add(genBlockInfo(ThreadLocalRandom.current().
nextLong()), 0, 0, 3);
}
// Choose 6 blocks from UnderReplicatedBlocks. Then it should pick 5 blocks
// from
// QUEUE_HIGHEST_PRIORITY and 1 block from QUEUE_VERY_UNDER_REPLICATED.
List<List<BlockInfo>> chosenBlocks =
underReplicatedBlocks.chooseUnderReplicatedBlocks(6);
assertTheChosenBlocks(chosenBlocks, 5, 1, 0, 0, 0);
// Choose 10 blocks from UnderReplicatedBlocks. Then it should pick 4 blocks from
// QUEUE_VERY_UNDER_REPLICATED, 5 blocks from QUEUE_UNDER_REPLICATED and 1
// block from QUEUE_REPLICAS_BADLY_DISTRIBUTED.
chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(10);
assertTheChosenBlocks(chosenBlocks, 0, 4, 5, 1, 0);
// Adding QUEUE_HIGHEST_PRIORITY
underReplicatedBlocks.add(genBlockInfo(ThreadLocalRandom.current().
nextLong()), 1, 0, 3);
// Choose 10 blocks from UnderReplicatedBlocks. Then it should pick 1 block from
// QUEUE_HIGHEST_PRIORITY, 4 blocks from QUEUE_REPLICAS_BADLY_DISTRIBUTED
// and 5 blocks from QUEUE_WITH_CORRUPT_BLOCKS.
chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(10);
assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 4, 5);
// Since it is reached to end of all lists,
// should start picking the blocks from start.
// Choose 7 blocks from UnderReplicatedBlocks. Then it should pick 6 blocks from
// QUEUE_HIGHEST_PRIORITY, 1 block from QUEUE_VERY_UNDER_REPLICATED.
chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(7);
assertTheChosenBlocks(chosenBlocks, 6, 1, 0, 0, 0);
}
/** asserts the chosen blocks with expected priority blocks */
private void assertTheChosenBlocks(
List<List<BlockInfo>> chosenBlocks, int firstPrioritySize,
int secondPrioritySize, int thirdPrioritySize, int fourthPrioritySize,
int fifthPrioritySize) {
assertEquals(
"Not returned the expected number of QUEUE_HIGHEST_PRIORITY blocks",
firstPrioritySize, chosenBlocks.get(
UnderReplicatedBlocks.QUEUE_HIGHEST_PRIORITY).size());
assertEquals(
"Not returned the expected number of QUEUE_VERY_UNDER_REPLICATED blocks",
secondPrioritySize, chosenBlocks.get(
UnderReplicatedBlocks.QUEUE_VERY_UNDER_REPLICATED).size());
assertEquals(
"Not returned the expected number of QUEUE_UNDER_REPLICATED blocks",
thirdPrioritySize, chosenBlocks.get(
UnderReplicatedBlocks.QUEUE_UNDER_REPLICATED).size());
assertEquals(
"Not returned the expected number of QUEUE_REPLICAS_BADLY_DISTRIBUTED blocks",
fourthPrioritySize, chosenBlocks.get(
UnderReplicatedBlocks.QUEUE_REPLICAS_BADLY_DISTRIBUTED).size());
assertEquals(
"Not returned the expected number of QUEUE_WITH_CORRUPT_BLOCKS blocks",
fifthPrioritySize, chosenBlocks.get(
UnderReplicatedBlocks.QUEUE_WITH_CORRUPT_BLOCKS).size());
}
/**
* Test for the chooseReplicaToDelete are processed based on
* block locality and free space
*/
@Test
public void testChooseReplicaToDelete() throws Exception {
List<DatanodeStorageInfo> replicaList = new ArrayList<DatanodeStorageInfo>();
final Map<String, List<DatanodeStorageInfo>> rackMap
= new HashMap<String, List<DatanodeStorageInfo>>();
dataNodes[0].setRemaining(4*1024*1024);
replicaList.add(storages[0]);
dataNodes[1].setRemaining(3*1024*1024);
replicaList.add(storages[1]);
dataNodes[2].setRemaining(2*1024*1024);
replicaList.add(storages[2]);
dataNodes[5].setRemaining(1*1024*1024);
replicaList.add(storages[5]);
// Refresh the last update time for all the datanodes
for (int i = 0; i < dataNodes.length; i++) {
DFSTestUtil.resetLastUpdatesWithOffset(dataNodes[i], 0);
}
List<DatanodeStorageInfo> first = new ArrayList<DatanodeStorageInfo>();
List<DatanodeStorageInfo> second = new ArrayList<DatanodeStorageInfo>();
replicator.splitNodesWithRack(replicaList, rackMap, first, second);
// storages[0] and storages[1] are in first set as their rack has two
// replica nodes, while storages[2] and dataNodes[5] are in second set.
assertEquals(2, first.size());
assertEquals(2, second.size());
List<StorageType> excessTypes = new ArrayList<StorageType>();
{
// test returning null
excessTypes.add(StorageType.SSD);
assertNull(replicator.chooseReplicaToDelete(
null, null, (short)3, first, second, excessTypes));
}
excessTypes.add(StorageType.DEFAULT);
DatanodeStorageInfo chosen = replicator.chooseReplicaToDelete(
null, null, (short)3, first, second, excessTypes);
// Within first set, storages[1] with less free space
assertEquals(chosen, storages[1]);
replicator.adjustSetsWithChosenReplica(rackMap, first, second, chosen);
assertEquals(0, first.size());
assertEquals(3, second.size());
// Within second set, storages[5] with less free space
excessTypes.add(StorageType.DEFAULT);
chosen = replicator.chooseReplicaToDelete(
null, null, (short)2, first, second, excessTypes);
assertEquals(chosen, storages[5]);
}
/**
* This testcase tests whether the default value returned by
* DFSUtil.getInvalidateWorkPctPerIteration() is positive,
* and whether an IllegalArgumentException will be thrown
* when 0.0f is retrieved
*/
@Test
public void testGetInvalidateWorkPctPerIteration() {
Configuration conf = new Configuration();
float blocksInvalidateWorkPct = DFSUtil
.getInvalidateWorkPctPerIteration(conf);
assertTrue(blocksInvalidateWorkPct > 0);
conf.set(DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION,
"0.5f");
blocksInvalidateWorkPct = DFSUtil.getInvalidateWorkPctPerIteration(conf);
assertEquals(blocksInvalidateWorkPct, 0.5f, blocksInvalidateWorkPct * 1e-7);
conf.set(DFSConfigKeys.
DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION, "1.0f");
blocksInvalidateWorkPct = DFSUtil.getInvalidateWorkPctPerIteration(conf);
assertEquals(blocksInvalidateWorkPct, 1.0f, blocksInvalidateWorkPct * 1e-7);
conf.set(DFSConfigKeys.
DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION, "0.0f");
exception.expect(IllegalArgumentException.class);
blocksInvalidateWorkPct = DFSUtil.getInvalidateWorkPctPerIteration(conf);
}
/**
* This testcase tests whether an IllegalArgumentException
* will be thrown when a negative value is retrieved by
* DFSUtil#getInvalidateWorkPctPerIteration
*/
@Test
public void testGetInvalidateWorkPctPerIteration_NegativeValue() {
Configuration conf = new Configuration();
float blocksInvalidateWorkPct = DFSUtil
.getInvalidateWorkPctPerIteration(conf);
assertTrue(blocksInvalidateWorkPct > 0);
conf.set(DFSConfigKeys.
DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION, "-0.5f");
exception.expect(IllegalArgumentException.class);
blocksInvalidateWorkPct = DFSUtil.getInvalidateWorkPctPerIteration(conf);
}
/**
* This testcase tests whether an IllegalArgumentException
* will be thrown when a value greater than 1 is retrieved by
* DFSUtil#getInvalidateWorkPctPerIteration
*/
@Test
public void testGetInvalidateWorkPctPerIteration_GreaterThanOne() {
Configuration conf = new Configuration();
float blocksInvalidateWorkPct = DFSUtil
.getInvalidateWorkPctPerIteration(conf);
assertTrue(blocksInvalidateWorkPct > 0);
conf.set(DFSConfigKeys.
DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION, "1.5f");
exception.expect(IllegalArgumentException.class);
blocksInvalidateWorkPct = DFSUtil.getInvalidateWorkPctPerIteration(conf);
}
/**
* This testcase tests whether the value returned by
* DFSUtil.getReplWorkMultiplier() is positive,
* and whether an IllegalArgumentException will be thrown
* when a non-positive value is retrieved
*/
@Test
public void testGetReplWorkMultiplier() {
Configuration conf = new Configuration();
int blocksReplWorkMultiplier = DFSUtil.getReplWorkMultiplier(conf);
assertTrue(blocksReplWorkMultiplier > 0);
conf.set(DFSConfigKeys.
DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION,"3");
blocksReplWorkMultiplier = DFSUtil.getReplWorkMultiplier(conf);
assertEquals(blocksReplWorkMultiplier, 3);
conf.set(DFSConfigKeys.
DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION,"-1");
exception.expect(IllegalArgumentException.class);
blocksReplWorkMultiplier = DFSUtil.getReplWorkMultiplier(conf);
}
@Test(timeout = 60000)
public void testUpdateDoesNotCauseSkippedReplication() {
UnderReplicatedBlocks underReplicatedBlocks = new UnderReplicatedBlocks();
BlockInfo block1 = genBlockInfo(ThreadLocalRandom.current().nextLong());
BlockInfo block2 = genBlockInfo(ThreadLocalRandom.current().nextLong());
BlockInfo block3 = genBlockInfo(ThreadLocalRandom.current().nextLong());
// Adding QUEUE_VERY_UNDER_REPLICATED block
final int block1CurReplicas = 2;
final int block1ExpectedReplicas = 7;
underReplicatedBlocks.add(block1, block1CurReplicas, 0,
block1ExpectedReplicas);
// Adding QUEUE_VERY_UNDER_REPLICATED block
underReplicatedBlocks.add(block2, 2, 0, 7);
// Adding QUEUE_UNDER_REPLICATED block
underReplicatedBlocks.add(block3, 2, 0, 6);
List<List<BlockInfo>> chosenBlocks;
// Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block
// from QUEUE_VERY_UNDER_REPLICATED.
chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1);
assertTheChosenBlocks(chosenBlocks, 0, 1, 0, 0, 0);
// Increasing the replications will move the block down a
// priority. This simulates a replica being completed in between checks.
underReplicatedBlocks.update(block1, block1CurReplicas+1, 0,
block1ExpectedReplicas, 1, 0);
// Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block
// from QUEUE_VERY_UNDER_REPLICATED.
// This block was moved up a priority and should not be skipped over.
chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1);
assertTheChosenBlocks(chosenBlocks, 0, 1, 0, 0, 0);
// Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block
// from QUEUE_UNDER_REPLICATED.
chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1);
assertTheChosenBlocks(chosenBlocks, 0, 0, 1, 0, 0);
}
@Test(timeout = 60000)
public void testAddStoredBlockDoesNotCauseSkippedReplication()
throws IOException {
Namesystem mockNS = mock(Namesystem.class);
when(mockNS.isPopulatingReplQueues()).thenReturn(true);
when(mockNS.hasWriteLock()).thenReturn(true);
BlockManager bm = new BlockManager(mockNS, new HdfsConfiguration());
UnderReplicatedBlocks underReplicatedBlocks = bm.neededReplications;
BlockInfo block1 = genBlockInfo(ThreadLocalRandom.current().nextLong());
BlockInfo block2 = genBlockInfo(ThreadLocalRandom.current().nextLong());
// Adding QUEUE_UNDER_REPLICATED block
underReplicatedBlocks.add(block1, 0, 1, 1);
// Adding QUEUE_UNDER_REPLICATED block
underReplicatedBlocks.add(block2, 0, 1, 1);
List<List<BlockInfo>> chosenBlocks;
// Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block
// from QUEUE_VERY_UNDER_REPLICATED.
chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1);
assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 0, 0);
// Adding this block collection to the BlockManager, so that when we add the
// block under construction, the BlockManager will realize the expected
// replication has been achieved and remove it from the under-replicated
// queue.
BlockInfoContiguousUnderConstruction info = new BlockInfoContiguousUnderConstruction(block1, (short) 1);
BlockCollection bc = mock(BlockCollection.class);
when(bc.getPreferredBlockReplication()).thenReturn((short)1);
bm.addBlockCollection(info, bc);
// Adding this block will increase its current replication, and that will
// remove it from the queue.
bm.addStoredBlockUnderConstruction(new StatefulBlockInfo(info, info,
ReplicaState.FINALIZED), TestReplicationPolicy.storages[0]);
// Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block
// from QUEUE_VERY_UNDER_REPLICATED.
// This block remains and should not be skipped over.
chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1);
assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 0, 0);
}
@Test(timeout = 60000)
public void
testConvertLastBlockToUnderConstructionDoesNotCauseSkippedReplication()
throws IOException {
Namesystem mockNS = mock(Namesystem.class);
when(mockNS.isPopulatingReplQueues()).thenReturn(true);
BlockManager bm = new BlockManager(mockNS, new HdfsConfiguration());
UnderReplicatedBlocks underReplicatedBlocks = bm.neededReplications;
BlockInfo block1 = genBlockInfo(ThreadLocalRandom.current().nextLong());
BlockInfo block2 = genBlockInfo(ThreadLocalRandom.current().nextLong());
// Adding QUEUE_UNDER_REPLICATED block
underReplicatedBlocks.add(block1, 0, 1, 1);
// Adding QUEUE_UNDER_REPLICATED block
underReplicatedBlocks.add(block2, 0, 1, 1);
List<List<BlockInfo>> chosenBlocks;
// Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block
// from QUEUE_VERY_UNDER_REPLICATED.
chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1);
assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 0, 0);
final BlockInfo info = new BlockInfoContiguous(block1, (short) 1);
final BlockCollection mbc = mock(BlockCollection.class);
when(mbc.getLastBlock()).thenReturn(info);
when(mbc.getPreferredBlockSize()).thenReturn(block1.getNumBytes() + 1);
when(mbc.getPreferredBlockReplication()).thenReturn((short)1);
when(mbc.isUnderConstruction()).thenReturn(true);
ContentSummary cs = mock(ContentSummary.class);
when(cs.getLength()).thenReturn((long)1);
when(mbc.computeContentSummary(bm.getStoragePolicySuite())).thenReturn(cs);
info.setBlockCollection(mbc);
bm.addBlockCollection(info, mbc);
DatanodeStorageInfo[] storageAry = {new DatanodeStorageInfo(
dataNodes[0], new DatanodeStorage("s1"))};
final BlockInfoContiguousUnderConstruction ucBlock =
info.convertToBlockUnderConstruction(BlockUCState.UNDER_CONSTRUCTION,
storageAry);
DatanodeStorageInfo storage = mock(DatanodeStorageInfo.class);
DatanodeDescriptor dn = mock(DatanodeDescriptor.class);
when(dn.isDecommissioned()).thenReturn(true);
when(storage.getState()).thenReturn(DatanodeStorage.State.NORMAL);
when(storage.getDatanodeDescriptor()).thenReturn(dn);
when(storage.removeBlock(any(BlockInfo.class))).thenReturn(true);
when(storage.addBlock(any(BlockInfo.class))).thenReturn
(DatanodeStorageInfo.AddBlockResult.ADDED);
ucBlock.addStorage(storage);
when(mbc.setLastBlock((BlockInfo) any(), (DatanodeStorageInfo[]) any()))
.thenReturn(ucBlock);
bm.convertLastBlockToUnderConstruction(mbc, 0L);
// Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block
// from QUEUE_VERY_UNDER_REPLICATED.
// This block remains and should not be skipped over.
chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1);
assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 0, 0);
}
@Test(timeout = 60000)
public void testupdateNeededReplicationsDoesNotCauseSkippedReplication()
throws IOException {
Namesystem mockNS = mock(Namesystem.class);
when(mockNS.isPopulatingReplQueues()).thenReturn(true);
BlockManager bm = new BlockManager(mockNS, new HdfsConfiguration());
UnderReplicatedBlocks underReplicatedBlocks = bm.neededReplications;
BlockInfo block1 = genBlockInfo(ThreadLocalRandom.current().nextLong());
BlockInfo block2 = genBlockInfo(ThreadLocalRandom.current().nextLong());
// Adding QUEUE_UNDER_REPLICATED block
underReplicatedBlocks.add(block1, 0, 1, 1);
// Adding QUEUE_UNDER_REPLICATED block
underReplicatedBlocks.add(block2, 0, 1, 1);
List<List<BlockInfo>> chosenBlocks;
// Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block
// from QUEUE_VERY_UNDER_REPLICATED.
chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1);
assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 0, 0);
bm.setReplication((short)0, (short)1, "", block1);
// Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block
// from QUEUE_VERY_UNDER_REPLICATED.
// This block remains and should not be skipped over.
chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1);
assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 0, 0);
}
}
| 53,141 | 39.909931 | 108 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlocks.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.junit.Test;
import java.util.Iterator;
public class TestUnderReplicatedBlocks {
@Test(timeout=60000) // 1 min timeout
public void testSetrepIncWithUnderReplicatedBlocks() throws Exception {
Configuration conf = new HdfsConfiguration();
final short REPLICATION_FACTOR = 2;
final String FILE_NAME = "/testFile";
final Path FILE_PATH = new Path(FILE_NAME);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION_FACTOR + 1).build();
try {
// create a file with one block with a replication factor of 2
final FileSystem fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, FILE_PATH, 1L, REPLICATION_FACTOR, 1L);
DFSTestUtil.waitReplication(fs, FILE_PATH, REPLICATION_FACTOR);
// remove one replica from the blocksMap so block becomes under-replicated
// but the block does not get put into the under-replicated blocks queue
final BlockManager bm = cluster.getNamesystem().getBlockManager();
ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, FILE_PATH);
DatanodeDescriptor dn = bm.blocksMap.getStorages(b.getLocalBlock())
.iterator().next().getDatanodeDescriptor();
bm.addToInvalidates(b.getLocalBlock(), dn);
Thread.sleep(5000);
bm.blocksMap.removeNode(b.getLocalBlock(), dn);
// increment this file's replication factor
FsShell shell = new FsShell(conf);
assertEquals(0, shell.run(new String[]{
"-setrep", "-w", Integer.toString(1+REPLICATION_FACTOR), FILE_NAME}));
} finally {
cluster.shutdown();
}
}
/**
* The test verifies the number of outstanding replication requests for a
* given DN shouldn't exceed the limit set by configuration property
* dfs.namenode.replication.max-streams-hard-limit.
* The test does the followings:
* 1. Create a mini cluster with 2 DNs. Set large heartbeat interval so that
* replication requests won't be picked by any DN right away.
* 2. Create a file with 10 blocks and replication factor 2. Thus each
* of the 2 DNs have one replica of each block.
* 3. Add a DN to the cluster for later replication.
* 4. Remove a DN that has data.
* 5. Ask BlockManager to compute the replication work. This will assign
* replication requests to the only DN that has data.
* 6. Make sure the number of pending replication requests of that DN don't
* exceed the limit.
* @throws Exception
*/
@Test(timeout=60000) // 1 min timeout
public void testNumberOfBlocksToBeReplicated() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 1);
// Large value to make sure the pending replication request can stay in
// DatanodeDescriptor.replicateBlocks before test timeout.
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 100);
// Make sure BlockManager can pull all blocks from UnderReplicatedBlocks via
// chooseUnderReplicatedBlocks at once.
conf.setInt(
DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION, 5);
int NUM_OF_BLOCKS = 10;
final short REP_FACTOR = 2;
final String FILE_NAME = "/testFile";
final Path FILE_PATH = new Path(FILE_NAME);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(
REP_FACTOR).build();
try {
// create a file with 10 blocks with a replication factor of 2
final FileSystem fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, FILE_PATH, NUM_OF_BLOCKS, REP_FACTOR, 1L);
DFSTestUtil.waitReplication(fs, FILE_PATH, REP_FACTOR);
cluster.startDataNodes(conf, 1, true, null, null, null, null);
final BlockManager bm = cluster.getNamesystem().getBlockManager();
ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, FILE_PATH);
Iterator<DatanodeStorageInfo> storageInfos =
bm.blocksMap.getStorages(b.getLocalBlock())
.iterator();
DatanodeDescriptor firstDn = storageInfos.next().getDatanodeDescriptor();
DatanodeDescriptor secondDn = storageInfos.next().getDatanodeDescriptor();
bm.getDatanodeManager().removeDatanode(firstDn);
assertEquals(NUM_OF_BLOCKS, bm.getUnderReplicatedNotMissingBlocks());
bm.computeDatanodeWork();
assertTrue("The number of blocks to be replicated should be less than "
+ "or equal to " + bm.replicationStreamsHardLimit,
secondDn.getNumberOfBlocksToBeReplicated()
<= bm.replicationStreamsHardLimit);
} finally {
cluster.shutdown();
}
}
}
| 6,098 | 41.354167 | 107 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.util.ArrayList;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.log4j.Level;
import org.junit.Test;
public class TestBlocksWithNotEnoughRacks {
public static final Log LOG = LogFactory.getLog(TestBlocksWithNotEnoughRacks.class);
static {
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger)LOG).getLogger().setLevel(Level.ALL);
}
/*
* Return a configuration object with low timeouts for testing and
* a topology script set (which enables rack awareness).
*/
private Configuration getConf() {
Configuration conf = new HdfsConfiguration();
// Lower the heart beat interval so the NN quickly learns of dead
// or decommissioned DNs and the NN issues replication and invalidation
// commands quickly (as replies to heartbeats)
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
// Have the NN ReplicationMonitor compute the replication and
// invalidation commands to send DNs every second.
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
// Have the NN check for pending replications every second so it
// quickly schedules additional replicas as they are identified.
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 1);
// The DNs report blocks every second.
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
// Indicates we have multiple racks
conf.set(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY, "xyz");
return conf;
}
/*
* Creates a block with all datanodes on the same rack, though the block
* is sufficiently replicated. Adds an additional datanode on a new rack.
* The block should be replicated to the new rack.
*/
@Test
public void testSufficientlyReplBlocksUsesNewRack() throws Exception {
Configuration conf = getConf();
final short REPLICATION_FACTOR = 3;
final Path filePath = new Path("/testFile");
// All datanodes are on the same rack
String racks[] = {"/rack1", "/rack1", "/rack1"};
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(racks.length).racks(racks).build();
try {
// Create a file with one block with a replication factor of 3
final FileSystem fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 0);
// Add a new datanode on a different rack
String newRacks[] = {"/rack2"};
cluster.startDataNodes(conf, 1, true, null, newRacks);
cluster.waitActive();
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
} finally {
cluster.shutdown();
}
}
/*
* Like the previous test but the block starts with a single replica,
* and therefore unlike the previous test the block does not start
* off needing replicas.
*/
@Test
public void testSufficientlySingleReplBlockUsesNewRack() throws Exception {
Configuration conf = getConf();
short REPLICATION_FACTOR = 1;
final Path filePath = new Path("/testFile");
String racks[] = {"/rack1", "/rack1", "/rack1", "/rack2"};
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(racks.length).racks(racks).build();
final FSNamesystem ns = cluster.getNameNode().getNamesystem();
try {
// Create a file with one block with a replication factor of 1
final FileSystem fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 0);
REPLICATION_FACTOR = 2;
NameNodeAdapter.setReplication(ns, "/testFile", REPLICATION_FACTOR);
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
} finally {
cluster.shutdown();
}
}
/*
* Creates a block with all datanodes on the same rack. Add additional
* datanodes on a different rack and increase the replication factor,
* making sure there are enough replicas across racks. If the previous
* test passes this one should too, however this test may pass when
* the previous one fails because the replication code is explicitly
* triggered by setting the replication factor.
*/
@Test
public void testUnderReplicatedUsesNewRacks() throws Exception {
Configuration conf = getConf();
short REPLICATION_FACTOR = 3;
final Path filePath = new Path("/testFile");
// All datanodes are on the same rack
String racks[] = {"/rack1", "/rack1", "/rack1", "/rack1", "/rack1"};
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(racks.length).racks(racks).build();
final FSNamesystem ns = cluster.getNameNode().getNamesystem();
try {
// Create a file with one block
final FileSystem fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 0);
// Add new datanodes on a different rack and increase the
// replication factor so the block is underreplicated and make
// sure at least one of the hosts on the new rack is used.
String newRacks[] = {"/rack2", "/rack2"};
cluster.startDataNodes(conf, 2, true, null, newRacks);
REPLICATION_FACTOR = 5;
NameNodeAdapter.setReplication(ns, "/testFile", REPLICATION_FACTOR);
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
} finally {
cluster.shutdown();
}
}
/*
* Test that a block that is re-replicated because one of its replicas
* is found to be corrupt and is re-replicated across racks.
*/
@Test
public void testCorruptBlockRereplicatedAcrossRacks() throws Exception {
Configuration conf = getConf();
short REPLICATION_FACTOR = 2;
int fileLen = 512;
final Path filePath = new Path("/testFile");
// Datanodes are spread across two racks
String racks[] = {"/rack1", "/rack1", "/rack2", "/rack2"};
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(racks.length).racks(racks).build();
final FSNamesystem ns = cluster.getNameNode().getNamesystem();
try {
// Create a file with one block with a replication factor of 2
final FileSystem fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, filePath, fileLen, REPLICATION_FACTOR, 1L);
final String fileContent = DFSTestUtil.readFile(fs, filePath);
ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
// Corrupt a replica of the block
int dnToCorrupt = DFSTestUtil.firstDnWithBlock(cluster, b);
assertTrue(cluster.corruptReplica(dnToCorrupt, b));
// Restart the datanode so blocks are re-scanned, and the corrupt
// block is detected.
cluster.restartDataNode(dnToCorrupt);
// Wait for the namenode to notice the corrupt replica
DFSTestUtil.waitCorruptReplicas(fs, ns, filePath, b, 1);
// The rack policy is still respected
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
// Ensure all replicas are valid (the corrupt replica may not
// have been cleaned up yet).
for (int i = 0; i < racks.length; i++) {
String blockContent = cluster.readBlockOnDataNode(i, b);
if (blockContent != null && i != dnToCorrupt) {
assertEquals("Corrupt replica", fileContent, blockContent);
}
}
} finally {
cluster.shutdown();
}
}
/*
* Reduce the replication factor of a file, making sure that the only
* cross rack replica is not removed when deleting replicas.
*/
@Test
public void testReduceReplFactorRespectsRackPolicy() throws Exception {
Configuration conf = getConf();
short REPLICATION_FACTOR = 3;
final Path filePath = new Path("/testFile");
String racks[] = {"/rack1", "/rack1", "/rack2", "/rack2"};
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(racks.length).racks(racks).build();
final FSNamesystem ns = cluster.getNameNode().getNamesystem();
try {
// Create a file with one block
final FileSystem fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
// Decrease the replication factor, make sure the deleted replica
// was not the one that lived on the rack with only one replica,
// ie we should still have 2 racks after reducing the repl factor.
REPLICATION_FACTOR = 2;
NameNodeAdapter.setReplication(ns, "/testFile", REPLICATION_FACTOR);
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
} finally {
cluster.shutdown();
}
}
/*
* Test that when a block is replicated because a replica is lost due
* to host failure the the rack policy is preserved.
*/
@Test
public void testReplDueToNodeFailRespectsRackPolicy() throws Exception {
Configuration conf = getConf();
short REPLICATION_FACTOR = 3;
final Path filePath = new Path("/testFile");
// Last datanode is on a different rack
String racks[] = {"/rack1", "/rack1", "/rack1", "/rack2", "/rack2"};
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(racks.length).racks(racks).build();
final FSNamesystem ns = cluster.getNameNode().getNamesystem();
final DatanodeManager dm = ns.getBlockManager().getDatanodeManager();
try {
// Create a file with one block with a replication factor of 2
final FileSystem fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
// Make the last datanode look like it failed to heartbeat by
// calling removeDatanode and stopping it.
ArrayList<DataNode> datanodes = cluster.getDataNodes();
int idx = datanodes.size() - 1;
DataNode dataNode = datanodes.get(idx);
DatanodeID dnId = dataNode.getDatanodeId();
cluster.stopDataNode(idx);
dm.removeDatanode(dnId);
// The block should still have sufficient # replicas, across racks.
// The last node may not have contained a replica, but if it did
// it should have been replicated within the same rack.
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
// Fail the last datanode again, it's also on rack2 so there is
// only 1 rack for all the replicas
datanodes = cluster.getDataNodes();
idx = datanodes.size() - 1;
dataNode = datanodes.get(idx);
dnId = dataNode.getDatanodeId();
cluster.stopDataNode(idx);
dm.removeDatanode(dnId);
// Make sure we have enough live replicas even though we are
// short one rack and therefore need one replica
DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 1);
} finally {
cluster.shutdown();
}
}
/*
* Test that when the excess replicas of a block are reduced due to
* a node re-joining the cluster the rack policy is not violated.
*/
@Test
public void testReduceReplFactorDueToRejoinRespectsRackPolicy()
throws Exception {
Configuration conf = getConf();
short REPLICATION_FACTOR = 2;
final Path filePath = new Path("/testFile");
// Last datanode is on a different rack
String racks[] = {"/rack1", "/rack1", "/rack2"};
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(racks.length).racks(racks).build();
final FSNamesystem ns = cluster.getNameNode().getNamesystem();
final DatanodeManager dm = ns.getBlockManager().getDatanodeManager();
try {
// Create a file with one block
final FileSystem fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
// Make the last (cross rack) datanode look like it failed
// to heartbeat by stopping it and calling removeDatanode.
ArrayList<DataNode> datanodes = cluster.getDataNodes();
assertEquals(3, datanodes.size());
DataNode dataNode = datanodes.get(2);
DatanodeID dnId = dataNode.getDatanodeId();
cluster.stopDataNode(2);
dm.removeDatanode(dnId);
// The block gets re-replicated to another datanode so it has a
// sufficient # replicas, but not across racks, so there should
// be 1 rack, and 1 needed replica (even though there are 2 hosts
// available and only 2 replicas required).
DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 1);
// Start the "failed" datanode, which has a replica so the block is
// now over-replicated and therefore a replica should be removed but
// not on the restarted datanode as that would violate the rack policy.
String rack2[] = {"/rack2"};
cluster.startDataNodes(conf, 1, true, null, rack2);
cluster.waitActive();
// The block now has sufficient # replicas, across racks
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
} finally {
cluster.shutdown();
}
}
/*
* Test that rack policy is still respected when blocks are replicated
* due to node decommissioning.
*/
@Test
public void testNodeDecomissionRespectsRackPolicy() throws Exception {
Configuration conf = getConf();
short REPLICATION_FACTOR = 2;
final Path filePath = new Path("/testFile");
// Configure an excludes file
FileSystem localFileSys = FileSystem.getLocal(conf);
Path workingDir = localFileSys.getWorkingDirectory();
Path dir = new Path(workingDir, "build/test/data/temp/decommission");
Path excludeFile = new Path(dir, "exclude");
Path includeFile = new Path(dir, "include");
assertTrue(localFileSys.mkdirs(dir));
DFSTestUtil.writeFile(localFileSys, excludeFile, "");
DFSTestUtil.writeFile(localFileSys, includeFile, "");
conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
// Two blocks and four racks
String racks[] = {"/rack1", "/rack1", "/rack2", "/rack2"};
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(racks.length).racks(racks).build();
final FSNamesystem ns = cluster.getNameNode().getNamesystem();
try {
// Create a file with one block
final FileSystem fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
// Decommission one of the hosts with the block, this should cause
// the block to get replicated to another host on the same rack,
// otherwise the rack policy is violated.
BlockLocation locs[] = fs.getFileBlockLocations(
fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
String name = locs[0].getNames()[0];
DFSTestUtil.writeFile(localFileSys, excludeFile, name);
ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
DFSTestUtil.waitForDecommission(fs, name);
// Check the block still has sufficient # replicas across racks
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
} finally {
cluster.shutdown();
}
}
/*
* Test that rack policy is still respected when blocks are replicated
* due to node decommissioning, when the blocks are over-replicated.
*/
@Test
public void testNodeDecomissionWithOverreplicationRespectsRackPolicy()
throws Exception {
Configuration conf = getConf();
short REPLICATION_FACTOR = 5;
final Path filePath = new Path("/testFile");
// Configure an excludes file
FileSystem localFileSys = FileSystem.getLocal(conf);
Path workingDir = localFileSys.getWorkingDirectory();
Path dir = new Path(workingDir, "build/test/data/temp/decommission");
Path excludeFile = new Path(dir, "exclude");
Path includeFile = new Path(dir, "include");
assertTrue(localFileSys.mkdirs(dir));
DFSTestUtil.writeFile(localFileSys, excludeFile, "");
DFSTestUtil.writeFile(localFileSys, includeFile, "");
conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
// All hosts are on two racks, only one host on /rack2
String racks[] = {"/rack1", "/rack2", "/rack1", "/rack1", "/rack1"};
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(racks.length).racks(racks).build();
final FSNamesystem ns = cluster.getNameNode().getNamesystem();
try {
final FileSystem fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
// Lower the replication factor so the blocks are over replicated
REPLICATION_FACTOR = 2;
fs.setReplication(filePath, REPLICATION_FACTOR);
// Decommission one of the hosts with the block that is not on
// the lone host on rack2 (if we decomission that host it would
// be impossible to respect the rack policy).
BlockLocation locs[] = fs.getFileBlockLocations(
fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
for (String top : locs[0].getTopologyPaths()) {
if (!top.startsWith("/rack2")) {
String name = top.substring("/rack1".length()+1);
DFSTestUtil.writeFile(localFileSys, excludeFile, name);
ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
DFSTestUtil.waitForDecommission(fs, name);
break;
}
}
// Check the block still has sufficient # replicas across racks,
// ie we didn't remove the replica on the host on /rack1.
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
} finally {
cluster.shutdown();
}
}
}
| 20,586 | 40.928717 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCachedBlocksList.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import java.util.Arrays;
import java.util.Iterator;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList;
import org.apache.hadoop.hdfs.server.namenode.CachedBlock;
import org.junit.Assert;
import org.junit.Test;
public class TestCachedBlocksList {
public static final Log LOG = LogFactory.getLog(TestCachedBlocksList.class);
@Test(timeout=60000)
public void testSingleList() {
DatanodeDescriptor dn = new DatanodeDescriptor(
new DatanodeID("127.0.0.1", "localhost", "abcd",
5000, 5001, 5002, 5003));
CachedBlock[] blocks = new CachedBlock[] {
new CachedBlock(0L, (short)1, true),
new CachedBlock(1L, (short)1, true),
new CachedBlock(2L, (short)1, true),
};
// check that lists are empty
Assert.assertTrue("expected pending cached list to start off empty.",
!dn.getPendingCached().iterator().hasNext());
Assert.assertTrue("expected cached list to start off empty.",
!dn.getCached().iterator().hasNext());
Assert.assertTrue("expected pending uncached list to start off empty.",
!dn.getPendingUncached().iterator().hasNext());
// add a block to the back
Assert.assertTrue(dn.getCached().add(blocks[0]));
Assert.assertTrue("expected pending cached list to still be empty.",
!dn.getPendingCached().iterator().hasNext());
Assert.assertEquals("failed to insert blocks[0]", blocks[0],
dn.getCached().iterator().next());
Assert.assertTrue("expected pending uncached list to still be empty.",
!dn.getPendingUncached().iterator().hasNext());
// add another block to the back
Assert.assertTrue(dn.getCached().add(blocks[1]));
Iterator<CachedBlock> iter = dn.getCached().iterator();
Assert.assertEquals(blocks[0], iter.next());
Assert.assertEquals(blocks[1], iter.next());
Assert.assertTrue(!iter.hasNext());
// add a block to the front
Assert.assertTrue(dn.getCached().addFirst(blocks[2]));
iter = dn.getCached().iterator();
Assert.assertEquals(blocks[2], iter.next());
Assert.assertEquals(blocks[0], iter.next());
Assert.assertEquals(blocks[1], iter.next());
Assert.assertTrue(!iter.hasNext());
// remove a block from the middle
Assert.assertTrue(dn.getCached().remove(blocks[0]));
iter = dn.getCached().iterator();
Assert.assertEquals(blocks[2], iter.next());
Assert.assertEquals(blocks[1], iter.next());
Assert.assertTrue(!iter.hasNext());
// remove all blocks
dn.getCached().clear();
Assert.assertTrue("expected cached list to be empty after clear.",
!dn.getPendingCached().iterator().hasNext());
}
private void testAddElementsToList(CachedBlocksList list,
CachedBlock[] blocks) {
Assert.assertTrue("expected list to start off empty.",
!list.iterator().hasNext());
for (CachedBlock block : blocks) {
Assert.assertTrue(list.add(block));
}
}
private void testRemoveElementsFromList(Random r,
CachedBlocksList list, CachedBlock[] blocks) {
int i = 0;
for (Iterator<CachedBlock> iter = list.iterator(); iter.hasNext(); ) {
Assert.assertEquals(blocks[i], iter.next());
i++;
}
if (r.nextBoolean()) {
LOG.info("Removing via iterator");
for (Iterator<CachedBlock> iter = list.iterator(); iter.hasNext() ;) {
iter.next();
iter.remove();
}
} else {
LOG.info("Removing in pseudo-random order");
CachedBlock[] remainingBlocks = Arrays.copyOf(blocks, blocks.length);
for (int removed = 0; removed < remainingBlocks.length; ) {
int toRemove = r.nextInt(remainingBlocks.length);
if (remainingBlocks[toRemove] != null) {
Assert.assertTrue(list.remove(remainingBlocks[toRemove]));
remainingBlocks[toRemove] = null;
removed++;
}
}
}
Assert.assertTrue("expected list to be empty after everything " +
"was removed.", !list.iterator().hasNext());
}
@Test(timeout=60000)
public void testMultipleLists() {
DatanodeDescriptor[] datanodes = new DatanodeDescriptor[] {
new DatanodeDescriptor(
new DatanodeID("127.0.0.1", "localhost", "abcd",
5000, 5001, 5002, 5003)),
new DatanodeDescriptor(
new DatanodeID("127.0.1.1", "localhost", "efgh",
6000, 6001, 6002, 6003)),
};
CachedBlocksList[] lists = new CachedBlocksList[] {
datanodes[0].getPendingCached(),
datanodes[0].getCached(),
datanodes[1].getPendingCached(),
datanodes[1].getCached(),
datanodes[1].getPendingUncached(),
};
final int NUM_BLOCKS = 8000;
CachedBlock[] blocks = new CachedBlock[NUM_BLOCKS];
for (int i = 0; i < NUM_BLOCKS; i++) {
blocks[i] = new CachedBlock(i, (short)i, true);
}
Random r = new Random(654);
for (CachedBlocksList list : lists) {
testAddElementsToList(list, blocks);
}
for (CachedBlocksList list : lists) {
testRemoveElementsFromList(r, list, blocks);
}
}
}
| 6,170 | 38.812903 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assume.assumeTrue;
import java.io.Closeable;
import java.io.File;
import java.io.IOException;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
import org.apache.hadoop.hdfs.server.namenode.ha.TestDNFencing.RandomDeleterPolicy;
import org.apache.hadoop.io.IOUtils;
import org.junit.Test;
import com.google.common.collect.Lists;
/**
* Test when RBW block is removed. Invalidation of the corrupted block happens
* and then the under replicated block gets replicated to the datanode.
*/
public class TestRBWBlockInvalidation {
private static final Log LOG = LogFactory.getLog(TestRBWBlockInvalidation.class);
private static NumberReplicas countReplicas(final FSNamesystem namesystem,
ExtendedBlock block) {
final BlockManager blockManager = namesystem.getBlockManager();
return blockManager.countNodes(blockManager.getStoredBlock(
block.getLocalBlock()));
}
/**
* Test when a block's replica is removed from RBW folder in one of the
* datanode, namenode should ask to invalidate that corrupted block and
* schedule replication for one more replica for that under replicated block.
*/
@Test(timeout=600000)
public void testBlockInvalidationWhenRBWReplicaMissedInDN()
throws IOException, InterruptedException {
// This test cannot pass on Windows due to file locking enforcement. It will
// reject the attempt to delete the block file from the RBW folder.
assumeTrue(!Path.WINDOWS);
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 2);
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 300);
conf.setLong(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2)
.build();
FSDataOutputStream out = null;
try {
final FSNamesystem namesystem = cluster.getNamesystem();
FileSystem fs = cluster.getFileSystem();
Path testPath = new Path("/tmp/TestRBWBlockInvalidation", "foo1");
out = fs.create(testPath, (short) 2);
out.writeBytes("HDFS-3157: " + testPath);
out.hsync();
cluster.startDataNodes(conf, 1, true, null, null, null);
String bpid = namesystem.getBlockPoolId();
ExtendedBlock blk = DFSTestUtil.getFirstBlock(fs, testPath);
Block block = blk.getLocalBlock();
DataNode dn = cluster.getDataNodes().get(0);
// Delete partial block and its meta information from the RBW folder
// of first datanode.
File blockFile = DataNodeTestUtils.getBlockFile(dn, bpid, block);
File metaFile = DataNodeTestUtils.getMetaFile(dn, bpid, block);
assertTrue("Could not delete the block file from the RBW folder",
blockFile.delete());
assertTrue("Could not delete the block meta file from the RBW folder",
metaFile.delete());
out.close();
int liveReplicas = 0;
while (true) {
if ((liveReplicas = countReplicas(namesystem, blk).liveReplicas()) < 2) {
// This confirms we have a corrupt replica
LOG.info("Live Replicas after corruption: " + liveReplicas);
break;
}
Thread.sleep(100);
}
assertEquals("There should be less than 2 replicas in the "
+ "liveReplicasMap", 1, liveReplicas);
while (true) {
if ((liveReplicas =
countReplicas(namesystem, blk).liveReplicas()) > 1) {
//Wait till the live replica count becomes equal to Replication Factor
LOG.info("Live Replicas after Rereplication: " + liveReplicas);
break;
}
Thread.sleep(100);
}
assertEquals("There should be two live replicas", 2, liveReplicas);
while (true) {
Thread.sleep(100);
if (countReplicas(namesystem, blk).corruptReplicas() == 0) {
LOG.info("Corrupt Replicas becomes 0");
break;
}
}
} finally {
if (out != null) {
out.close();
}
cluster.shutdown();
}
}
/**
* Regression test for HDFS-4799, a case where, upon restart, if there
* were RWR replicas with out-of-date genstamps, the NN could accidentally
* delete good replicas instead of the bad replicas.
*/
@Test(timeout=60000)
public void testRWRInvalidation() throws Exception {
Configuration conf = new HdfsConfiguration();
// Set the deletion policy to be randomized rather than the default.
// The default is based on disk space, which isn't controllable
// in the context of the test, whereas a random one is more accurate
// to what is seen in real clusters (nodes have random amounts of free
// space)
conf.setClass(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY,
RandomDeleterPolicy.class, BlockPlacementPolicy.class);
// Speed up the test a bit with faster heartbeats.
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
// Test with a bunch of separate files, since otherwise the test may
// fail just due to "good luck", even if a bug is present.
List<Path> testPaths = Lists.newArrayList();
for (int i = 0; i < 10; i++) {
testPaths.add(new Path("/test" + i));
}
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2)
.build();
try {
List<FSDataOutputStream> streams = Lists.newArrayList();
try {
// Open the test files and write some data to each
for (Path path : testPaths) {
FSDataOutputStream out = cluster.getFileSystem().create(path, (short)2);
streams.add(out);
out.writeBytes("old gs data\n");
out.hflush();
}
// Shutdown one of the nodes in the pipeline
DataNodeProperties oldGenstampNode = cluster.stopDataNode(0);
// Write some more data and flush again. This data will only
// be in the latter genstamp copy of the blocks.
for (int i = 0; i < streams.size(); i++) {
Path path = testPaths.get(i);
FSDataOutputStream out = streams.get(i);
out.writeBytes("new gs data\n");
out.hflush();
// Set replication so that only one node is necessary for this block,
// and close it.
cluster.getFileSystem().setReplication(path, (short)1);
out.close();
}
// Upon restart, there will be two replicas, one with an old genstamp
// and one current copy. This test wants to ensure that the old genstamp
// copy is the one that is deleted.
LOG.info("=========================== restarting cluster");
DataNodeProperties otherNode = cluster.stopDataNode(0);
cluster.restartNameNode();
// Restart the datanode with the corrupt replica first.
cluster.restartDataNode(oldGenstampNode);
cluster.waitActive();
// Then the other node
cluster.restartDataNode(otherNode);
cluster.waitActive();
// Compute and send invalidations, waiting until they're fully processed.
cluster.getNameNode().getNamesystem().getBlockManager()
.computeInvalidateWork(2);
cluster.triggerHeartbeats();
HATestUtil.waitForDNDeletions(cluster);
cluster.triggerDeletionReports();
// Make sure we can still read the blocks.
for (Path path : testPaths) {
String ret = DFSTestUtil.readFile(cluster.getFileSystem(), path);
assertEquals("old gs data\n" + "new gs data\n", ret);
}
} finally {
IOUtils.cleanup(LOG, streams.toArray(new Closeable[0]));
}
} finally {
cluster.shutdown();
}
}
}
| 9,589 | 38.142857 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
public class TestHDFSConcat {
public static final Log LOG = LogFactory.getLog(TestHDFSConcat.class);
private static final short REPL_FACTOR = 2;
private MiniDFSCluster cluster;
private NamenodeProtocols nn;
private DistributedFileSystem dfs;
private static final long blockSize = 512;
private static final Configuration conf;
static {
conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
}
@Before
public void startUpCluster() throws IOException {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL_FACTOR).build();
assertNotNull("Failed Cluster Creation", cluster);
cluster.waitClusterUp();
dfs = cluster.getFileSystem();
assertNotNull("Failed to get FileSystem", dfs);
nn = cluster.getNameNodeRpc();
assertNotNull("Failed to get NameNode", nn);
}
@After
public void shutDownCluster() throws IOException {
if(dfs != null) {
dfs.close();
}
if(cluster != null) {
cluster.shutdownDataNodes();
cluster.shutdown();
}
}
/**
* Concatenates 10 files into one
* Verifies the final size, deletion of the file, number of blocks
* @throws IOException
*/
@Test
public void testConcat() throws IOException, InterruptedException {
final int numFiles = 10;
long fileLen = blockSize*3;
HdfsFileStatus fStatus;
FSDataInputStream stm;
String trg = "/trg";
Path trgPath = new Path(trg);
DFSTestUtil.createFile(dfs, trgPath, fileLen, REPL_FACTOR, 1);
fStatus = nn.getFileInfo(trg);
long trgLen = fStatus.getLen();
long trgBlocks = nn.getBlockLocations(trg, 0, trgLen).locatedBlockCount();
Path [] files = new Path[numFiles];
byte [] [] bytes = new byte [numFiles][(int)fileLen];
LocatedBlocks [] lblocks = new LocatedBlocks[numFiles];
long [] lens = new long [numFiles];
int i;
for(i=0; i<files.length; i++) {
files[i] = new Path("/file"+i);
Path path = files[i];
System.out.println("Creating file " + path);
DFSTestUtil.createFile(dfs, path, fileLen, REPL_FACTOR, 1);
fStatus = nn.getFileInfo(path.toUri().getPath());
lens[i] = fStatus.getLen();
assertEquals(trgLen, lens[i]); // file of the same length.
lblocks[i] = nn.getBlockLocations(path.toUri().getPath(), 0, lens[i]);
//read the file
stm = dfs.open(path);
stm.readFully(0, bytes[i]);
//bytes[i][10] = 10;
stm.close();
}
// check permissions -try the operation with the "wrong" user
final UserGroupInformation user1 = UserGroupInformation.createUserForTesting(
"theDoctor", new String[] { "tardis" });
DistributedFileSystem hdfs =
(DistributedFileSystem)DFSTestUtil.getFileSystemAs(user1, conf);
try {
hdfs.concat(trgPath, files);
fail("Permission exception expected");
} catch (IOException ie) {
System.out.println("Got expected exception for permissions:"
+ ie.getLocalizedMessage());
// expected
}
// check count update
ContentSummary cBefore = dfs.getContentSummary(trgPath.getParent());
// now concatenate
dfs.concat(trgPath, files);
// verify count
ContentSummary cAfter = dfs.getContentSummary(trgPath.getParent());
assertEquals(cBefore.getFileCount(), cAfter.getFileCount()+files.length);
// verify other stuff
long totalLen = trgLen;
long totalBlocks = trgBlocks;
for(i=0; i<files.length; i++) {
totalLen += lens[i];
totalBlocks += lblocks[i].locatedBlockCount();
}
System.out.println("total len=" + totalLen + "; totalBlocks=" + totalBlocks);
fStatus = nn.getFileInfo(trg);
trgLen = fStatus.getLen(); // new length
// read the resulting file
stm = dfs.open(trgPath);
byte[] byteFileConcat = new byte[(int)trgLen];
stm.readFully(0, byteFileConcat);
stm.close();
trgBlocks = nn.getBlockLocations(trg, 0, trgLen).locatedBlockCount();
//verifications
// 1. number of blocks
assertEquals(trgBlocks, totalBlocks);
// 2. file lengths
assertEquals(trgLen, totalLen);
// 3. removal of the src file
for(Path p: files) {
fStatus = nn.getFileInfo(p.toUri().getPath());
assertNull("File " + p + " still exists", fStatus); // file shouldn't exist
// try to create fie with the same name
DFSTestUtil.createFile(dfs, p, fileLen, REPL_FACTOR, 1);
}
// 4. content
checkFileContent(byteFileConcat, bytes);
// add a small file (less then a block)
Path smallFile = new Path("/sfile");
int sFileLen = 10;
DFSTestUtil.createFile(dfs, smallFile, sFileLen, REPL_FACTOR, 1);
dfs.concat(trgPath, new Path [] {smallFile});
fStatus = nn.getFileInfo(trg);
trgLen = fStatus.getLen(); // new length
// check number of blocks
trgBlocks = nn.getBlockLocations(trg, 0, trgLen).locatedBlockCount();
assertEquals(trgBlocks, totalBlocks+1);
// and length
assertEquals(trgLen, totalLen+sFileLen);
}
/**
* Test that the concat operation is properly persisted in the
* edit log, and properly replayed on restart.
*/
@Test
public void testConcatInEditLog() throws Exception {
final Path TEST_DIR = new Path("/testConcatInEditLog");
final long FILE_LEN = blockSize;
// 1. Concat some files
Path[] srcFiles = new Path[3];
for (int i = 0; i < srcFiles.length; i++) {
Path path = new Path(TEST_DIR, "src-" + i);
DFSTestUtil.createFile(dfs, path, FILE_LEN, REPL_FACTOR, 1);
srcFiles[i] = path;
}
Path targetFile = new Path(TEST_DIR, "target");
DFSTestUtil.createFile(dfs, targetFile, FILE_LEN, REPL_FACTOR, 1);
dfs.concat(targetFile, srcFiles);
// 2. Verify the concat operation basically worked, and record
// file status.
assertTrue(dfs.exists(targetFile));
FileStatus origStatus = dfs.getFileStatus(targetFile);
// 3. Restart NN to force replay from edit log
cluster.restartNameNode(true);
// 4. Verify concat operation was replayed correctly and file status
// did not change.
assertTrue(dfs.exists(targetFile));
assertFalse(dfs.exists(srcFiles[0]));
FileStatus statusAfterRestart = dfs.getFileStatus(targetFile);
assertEquals(origStatus.getModificationTime(),
statusAfterRestart.getModificationTime());
}
// compare content
private void checkFileContent(byte[] concat, byte[][] bytes ) {
int idx=0;
boolean mismatch = false;
for(byte [] bb: bytes) {
for(byte b: bb) {
if(b != concat[idx++]) {
mismatch=true;
break;
}
}
if(mismatch)
break;
}
assertFalse("File content of concatenated file is different", mismatch);
}
// test case when final block is not of a full length
@Test
public void testConcatNotCompleteBlock() throws IOException {
long trgFileLen = blockSize*3;
long srcFileLen = blockSize*3+20; // block at the end - not full
// create first file
String name1="/trg", name2="/src";
Path filePath1 = new Path(name1);
DFSTestUtil.createFile(dfs, filePath1, trgFileLen, REPL_FACTOR, 1);
HdfsFileStatus fStatus = nn.getFileInfo(name1);
long fileLen = fStatus.getLen();
assertEquals(fileLen, trgFileLen);
//read the file
FSDataInputStream stm = dfs.open(filePath1);
byte[] byteFile1 = new byte[(int)trgFileLen];
stm.readFully(0, byteFile1);
stm.close();
LocatedBlocks lb1 = nn.getBlockLocations(name1, 0, trgFileLen);
Path filePath2 = new Path(name2);
DFSTestUtil.createFile(dfs, filePath2, srcFileLen, REPL_FACTOR, 1);
fStatus = nn.getFileInfo(name2);
fileLen = fStatus.getLen();
assertEquals(srcFileLen, fileLen);
// read the file
stm = dfs.open(filePath2);
byte[] byteFile2 = new byte[(int)srcFileLen];
stm.readFully(0, byteFile2);
stm.close();
LocatedBlocks lb2 = nn.getBlockLocations(name2, 0, srcFileLen);
System.out.println("trg len="+trgFileLen+"; src len="+srcFileLen);
// move the blocks
dfs.concat(filePath1, new Path [] {filePath2});
long totalLen = trgFileLen + srcFileLen;
fStatus = nn.getFileInfo(name1);
fileLen = fStatus.getLen();
// read the resulting file
stm = dfs.open(filePath1);
byte[] byteFileConcat = new byte[(int)fileLen];
stm.readFully(0, byteFileConcat);
stm.close();
LocatedBlocks lbConcat = nn.getBlockLocations(name1, 0, fileLen);
//verifications
// 1. number of blocks
assertEquals(lbConcat.locatedBlockCount(),
lb1.locatedBlockCount() + lb2.locatedBlockCount());
// 2. file lengths
System.out.println("file1 len="+fileLen+"; total len="+totalLen);
assertEquals(fileLen, totalLen);
// 3. removal of the src file
fStatus = nn.getFileInfo(name2);
assertNull("File "+name2+ "still exists", fStatus); // file shouldn't exist
// 4. content
checkFileContent(byteFileConcat, new byte [] [] {byteFile1, byteFile2});
}
/**
* test illegal args cases
*/
@Test
public void testIllegalArg() throws IOException {
long fileLen = blockSize*3;
Path parentDir = new Path ("/parentTrg");
assertTrue(dfs.mkdirs(parentDir));
Path trg = new Path(parentDir, "trg");
DFSTestUtil.createFile(dfs, trg, fileLen, REPL_FACTOR, 1);
// must be in the same dir
{
// create first file
Path dir1 = new Path ("/dir1");
assertTrue(dfs.mkdirs(dir1));
Path src = new Path(dir1, "src");
DFSTestUtil.createFile(dfs, src, fileLen, REPL_FACTOR, 1);
try {
dfs.concat(trg, new Path [] {src});
fail("didn't fail for src and trg in different directories");
} catch (Exception e) {
// expected
}
}
// non existing file
try {
dfs.concat(trg, new Path [] {new Path("test1/a")}); // non existing file
fail("didn't fail with invalid arguments");
} catch (Exception e) {
//expected
}
// empty arg list
try {
dfs.concat(trg, new Path [] {}); // empty array
fail("didn't fail with invalid arguments");
} catch (Exception e) {
// exspected
}
// the source file's preferred block size cannot be greater than the target
{
final Path src1 = new Path(parentDir, "src1");
DFSTestUtil.createFile(dfs, src1, fileLen, REPL_FACTOR, 0L);
final Path src2 = new Path(parentDir, "src2");
// create a file whose preferred block size is greater than the target
DFSTestUtil.createFile(dfs, src2, 1024, fileLen,
dfs.getDefaultBlockSize(trg) * 2, REPL_FACTOR, 0L);
try {
dfs.concat(trg, new Path[] {src1, src2});
fail("didn't fail for src with greater preferred block size");
} catch (Exception e) {
GenericTestUtils.assertExceptionContains("preferred block size", e);
}
}
}
/**
* make sure we update the quota correctly after concat
*/
@Test
public void testConcatWithQuotaDecrease() throws IOException {
final short srcRepl = 3; // note this is different with REPL_FACTOR
final int srcNum = 10;
final Path foo = new Path("/foo");
final Path[] srcs = new Path[srcNum];
final Path target = new Path(foo, "target");
DFSTestUtil.createFile(dfs, target, blockSize, REPL_FACTOR, 0L);
dfs.setQuota(foo, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
for (int i = 0; i < srcNum; i++) {
srcs[i] = new Path(foo, "src" + i);
DFSTestUtil.createFile(dfs, srcs[i], blockSize * 2, srcRepl, 0L);
}
ContentSummary summary = dfs.getContentSummary(foo);
Assert.assertEquals(11, summary.getFileCount());
Assert.assertEquals(blockSize * REPL_FACTOR +
blockSize * 2 * srcRepl * srcNum, summary.getSpaceConsumed());
dfs.concat(target, srcs);
summary = dfs.getContentSummary(foo);
Assert.assertEquals(1, summary.getFileCount());
Assert.assertEquals(
blockSize * REPL_FACTOR + blockSize * 2 * REPL_FACTOR * srcNum,
summary.getSpaceConsumed());
}
@Test
public void testConcatWithQuotaIncrease() throws IOException {
final short repl = 3;
final int srcNum = 10;
final Path foo = new Path("/foo");
final Path bar = new Path(foo, "bar");
final Path[] srcs = new Path[srcNum];
final Path target = new Path(bar, "target");
DFSTestUtil.createFile(dfs, target, blockSize, repl, 0L);
final long dsQuota = blockSize * repl + blockSize * srcNum * REPL_FACTOR;
dfs.setQuota(foo, Long.MAX_VALUE - 1, dsQuota);
for (int i = 0; i < srcNum; i++) {
srcs[i] = new Path(bar, "src" + i);
DFSTestUtil.createFile(dfs, srcs[i], blockSize, REPL_FACTOR, 0L);
}
ContentSummary summary = dfs.getContentSummary(bar);
Assert.assertEquals(11, summary.getFileCount());
Assert.assertEquals(dsQuota, summary.getSpaceConsumed());
try {
dfs.concat(target, srcs);
fail("QuotaExceededException expected");
} catch (RemoteException e) {
Assert.assertTrue(
e.unwrapRemoteException() instanceof QuotaExceededException);
}
dfs.setQuota(foo, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
dfs.concat(target, srcs);
summary = dfs.getContentSummary(bar);
Assert.assertEquals(1, summary.getFileCount());
Assert.assertEquals(blockSize * repl * (srcNum + 1),
summary.getSpaceConsumed());
}
}
| 15,757 | 31.829167 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.PrintStream;
import java.net.URI;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.Properties;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.util.ExitUtil;
import org.apache.hadoop.util.ExitUtil.ExitException;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class TestClusterId {
private static final Log LOG = LogFactory.getLog(TestClusterId.class);
File hdfsDir;
Configuration config;
private String getClusterId(Configuration config) throws IOException {
// see if cluster id not empty.
Collection<URI> dirsToFormat = FSNamesystem.getNamespaceDirs(config);
List<URI> editsToFormat = FSNamesystem.getNamespaceEditsDirs(config);
FSImage fsImage = new FSImage(config, dirsToFormat, editsToFormat);
Iterator<StorageDirectory> sdit =
fsImage.getStorage().dirIterator(NNStorage.NameNodeDirType.IMAGE);
StorageDirectory sd = sdit.next();
Properties props = Storage.readPropertiesFile(sd.getVersionFile());
String cid = props.getProperty("clusterID");
LOG.info("successfully formated : sd="+sd.getCurrentDir() + ";cid="+cid);
return cid;
}
@Before
public void setUp() throws IOException {
ExitUtil.disableSystemExit();
String baseDir = PathUtils.getTestDirName(getClass());
hdfsDir = new File(baseDir, "dfs/name");
if (hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir)) {
throw new IOException("Could not delete test directory '" + hdfsDir + "'");
}
LOG.info("hdfsdir is " + hdfsDir.getAbsolutePath());
// as some tests might change these values we reset them to defaults before
// every test
StartupOption.FORMAT.setForceFormat(false);
StartupOption.FORMAT.setInteractiveFormat(true);
config = new Configuration();
config.set(DFS_NAMENODE_NAME_DIR_KEY, hdfsDir.getPath());
}
@After
public void tearDown() throws IOException {
if (hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir)) {
throw new IOException("Could not tearDown test directory '" + hdfsDir
+ "'");
}
}
@Test
public void testFormatClusterIdOption() throws IOException {
// 1. should format without cluster id
//StartupOption.FORMAT.setClusterId("");
NameNode.format(config);
// see if cluster id not empty.
String cid = getClusterId(config);
assertTrue("Didn't get new ClusterId", (cid != null && !cid.equals("")) );
// 2. successful format with given clusterid
StartupOption.FORMAT.setClusterId("mycluster");
NameNode.format(config);
// see if cluster id matches with given clusterid.
cid = getClusterId(config);
assertTrue("ClusterId didn't match", cid.equals("mycluster"));
// 3. format without any clusterid again. It should generate new
//clusterid.
StartupOption.FORMAT.setClusterId("");
NameNode.format(config);
String newCid = getClusterId(config);
assertFalse("ClusterId should not be the same", newCid.equals(cid));
}
/**
* Test namenode format with -format option. Format should succeed.
*
* @throws IOException
*/
@Test
public void testFormat() throws IOException {
String[] argv = { "-format" };
try {
NameNode.createNameNode(argv, config);
fail("createNameNode() did not call System.exit()");
} catch (ExitException e) {
assertEquals("Format should have succeeded", 0, e.status);
}
String cid = getClusterId(config);
assertTrue("Didn't get new ClusterId", (cid != null && !cid.equals("")));
}
/**
* Test namenode format with -format option when an empty name directory
* exists. Format should succeed.
*
* @throws IOException
*/
@Test
public void testFormatWithEmptyDir() throws IOException {
if (!hdfsDir.mkdirs()) {
fail("Failed to create dir " + hdfsDir.getPath());
}
String[] argv = { "-format" };
try {
NameNode.createNameNode(argv, config);
fail("createNameNode() did not call System.exit()");
} catch (ExitException e) {
assertEquals("Format should have succeeded", 0, e.status);
}
String cid = getClusterId(config);
assertTrue("Didn't get new ClusterId", (cid != null && !cid.equals("")));
}
/**
* Test namenode format with -format -force options when name directory
* exists. Format should succeed.
*
* @throws IOException
*/
@Test
public void testFormatWithForce() throws IOException {
if (!hdfsDir.mkdirs()) {
fail("Failed to create dir " + hdfsDir.getPath());
}
String[] argv = { "-format", "-force" };
try {
NameNode.createNameNode(argv, config);
fail("createNameNode() did not call System.exit()");
} catch (ExitException e) {
assertEquals("Format should have succeeded", 0, e.status);
}
String cid = getClusterId(config);
assertTrue("Didn't get new ClusterId", (cid != null && !cid.equals("")));
}
/**
* Test namenode format with -format -force -clusterid option when name
* directory exists. Format should succeed.
*
* @throws IOException
*/
@Test
public void testFormatWithForceAndClusterId() throws IOException {
if (!hdfsDir.mkdirs()) {
fail("Failed to create dir " + hdfsDir.getPath());
}
String myId = "testFormatWithForceAndClusterId";
String[] argv = { "-format", "-force", "-clusterid", myId };
try {
NameNode.createNameNode(argv, config);
fail("createNameNode() did not call System.exit()");
} catch (ExitException e) {
assertEquals("Format should have succeeded", 0, e.status);
}
String cId = getClusterId(config);
assertEquals("ClusterIds do not match", myId, cId);
}
/**
* Test namenode format with -clusterid -force option. Format command should
* fail as no cluster id was provided.
*
* @throws IOException
*/
@Test
public void testFormatWithInvalidClusterIdOption() throws IOException {
String[] argv = { "-format", "-clusterid", "-force" };
PrintStream origErr = System.err;
ByteArrayOutputStream baos = new ByteArrayOutputStream();
PrintStream stdErr = new PrintStream(baos);
System.setErr(stdErr);
NameNode.createNameNode(argv, config);
// Check if usage is printed
assertTrue(baos.toString("UTF-8").contains("Usage: hdfs namenode"));
System.setErr(origErr);
// check if the version file does not exists.
File version = new File(hdfsDir, "current/VERSION");
assertFalse("Check version should not exist", version.exists());
}
/**
* Test namenode format with -format -clusterid options. Format should fail
* was no clusterid was sent.
*
* @throws IOException
*/
@Test
public void testFormatWithNoClusterIdOption() throws IOException {
String[] argv = { "-format", "-clusterid" };
PrintStream origErr = System.err;
ByteArrayOutputStream baos = new ByteArrayOutputStream();
PrintStream stdErr = new PrintStream(baos);
System.setErr(stdErr);
NameNode.createNameNode(argv, config);
// Check if usage is printed
assertTrue(baos.toString("UTF-8").contains("Usage: hdfs namenode"));
System.setErr(origErr);
// check if the version file does not exists.
File version = new File(hdfsDir, "current/VERSION");
assertFalse("Check version should not exist", version.exists());
}
/**
* Test namenode format with -format -clusterid and empty clusterid. Format
* should fail as no valid if was provided.
*
* @throws IOException
*/
@Test
public void testFormatWithEmptyClusterIdOption() throws IOException {
String[] argv = { "-format", "-clusterid", "" };
PrintStream origErr = System.err;
ByteArrayOutputStream baos = new ByteArrayOutputStream();
PrintStream stdErr = new PrintStream(baos);
System.setErr(stdErr);
NameNode.createNameNode(argv, config);
// Check if usage is printed
assertTrue(baos.toString("UTF-8").contains("Usage: hdfs namenode"));
System.setErr(origErr);
// check if the version file does not exists.
File version = new File(hdfsDir, "current/VERSION");
assertFalse("Check version should not exist", version.exists());
}
/**
* Test namenode format with -format -nonInteractive options when a non empty
* name directory exists. Format should not succeed.
*
* @throws IOException
*/
@Test
public void testFormatWithNonInteractive() throws IOException {
// we check for a non empty dir, so create a child path
File data = new File(hdfsDir, "file");
if (!data.mkdirs()) {
fail("Failed to create dir " + data.getPath());
}
String[] argv = { "-format", "-nonInteractive" };
try {
NameNode.createNameNode(argv, config);
fail("createNameNode() did not call System.exit()");
} catch (ExitException e) {
assertEquals("Format should have been aborted with exit code 1", 1,
e.status);
}
// check if the version file does not exists.
File version = new File(hdfsDir, "current/VERSION");
assertFalse("Check version should not exist", version.exists());
}
/**
* Test namenode format with -format -nonInteractive options when name
* directory does not exist. Format should succeed.
*
* @throws IOException
*/
@Test
public void testFormatWithNonInteractiveNameDirDoesNotExit()
throws IOException {
String[] argv = { "-format", "-nonInteractive" };
try {
NameNode.createNameNode(argv, config);
fail("createNameNode() did not call System.exit()");
} catch (ExitException e) {
assertEquals("Format should have succeeded", 0, e.status);
}
String cid = getClusterId(config);
assertTrue("Didn't get new ClusterId", (cid != null && !cid.equals("")));
}
/**
* Test namenode format with -force -nonInteractive -force option. Format
* should succeed.
*
* @throws IOException
*/
@Test
public void testFormatWithNonInteractiveAndForce() throws IOException {
if (!hdfsDir.mkdirs()) {
fail("Failed to create dir " + hdfsDir.getPath());
}
String[] argv = { "-format", "-nonInteractive", "-force" };
try {
NameNode.createNameNode(argv, config);
fail("createNameNode() did not call System.exit()");
} catch (ExitException e) {
assertEquals("Format should have succeeded", 0, e.status);
}
String cid = getClusterId(config);
assertTrue("Didn't get new ClusterId", (cid != null && !cid.equals("")));
}
/**
* Test namenode format with -format option when a non empty name directory
* exists. Enter Y when prompted and the format should succeed.
*
* @throws IOException
* @throws InterruptedException
*/
@Test
public void testFormatWithoutForceEnterYes() throws IOException,
InterruptedException {
// we check for a non empty dir, so create a child path
File data = new File(hdfsDir, "file");
if (!data.mkdirs()) {
fail("Failed to create dir " + data.getPath());
}
// capture the input stream
InputStream origIn = System.in;
ByteArrayInputStream bins = new ByteArrayInputStream("Y\n".getBytes());
System.setIn(bins);
String[] argv = { "-format" };
try {
NameNode.createNameNode(argv, config);
fail("createNameNode() did not call System.exit()");
} catch (ExitException e) {
assertEquals("Format should have succeeded", 0, e.status);
}
System.setIn(origIn);
String cid = getClusterId(config);
assertTrue("Didn't get new ClusterId", (cid != null && !cid.equals("")));
}
/**
* Test namenode format with -format option when a non empty name directory
* exists. Enter N when prompted and format should be aborted.
*
* @throws IOException
* @throws InterruptedException
*/
@Test
public void testFormatWithoutForceEnterNo() throws IOException,
InterruptedException {
// we check for a non empty dir, so create a child path
File data = new File(hdfsDir, "file");
if (!data.mkdirs()) {
fail("Failed to create dir " + data.getPath());
}
// capture the input stream
InputStream origIn = System.in;
ByteArrayInputStream bins = new ByteArrayInputStream("N\n".getBytes());
System.setIn(bins);
String[] argv = { "-format" };
try {
NameNode.createNameNode(argv, config);
fail("createNameNode() did not call System.exit()");
} catch (ExitException e) {
assertEquals("Format should not have succeeded", 1, e.status);
}
System.setIn(origIn);
// check if the version file does not exists.
File version = new File(hdfsDir, "current/VERSION");
assertFalse("Check version should not exist", version.exists());
}
}
| 14,421 | 31.120267 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsckWithMultipleNameNodes.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import java.net.URI;
import java.util.Random;
import java.util.concurrent.TimeoutException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.viewfs.ConfigUtil;
import org.apache.hadoop.fs.viewfs.ViewFileSystem;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.server.balancer.TestBalancer;
import org.apache.log4j.Level;
import org.junit.Assert;
import org.junit.Test;
/**
* Test fsck with multiple NameNodes
*/
public class TestFsckWithMultipleNameNodes {
static final Log LOG = LogFactory.getLog(TestFsckWithMultipleNameNodes.class);
{
DFSTestUtil.setNameNodeLogLevel(Level.ALL);
}
private static final String FILE_NAME = "/tmp.txt";
private static final Path FILE_PATH = new Path(FILE_NAME);
private static final Random RANDOM = new Random();
static {
TestBalancer.initTestSetup();
}
/** Common objects used in various methods. */
private static class Suite {
final MiniDFSCluster cluster;
final ClientProtocol[] clients;
final short replication;
Suite(MiniDFSCluster cluster, final int nNameNodes, final int nDataNodes)
throws IOException {
this.cluster = cluster;
clients = new ClientProtocol[nNameNodes];
for(int i = 0; i < nNameNodes; i++) {
clients[i] = cluster.getNameNode(i).getRpcServer();
}
replication = (short)Math.max(1, nDataNodes - 1);
}
/** create a file with a length of <code>fileLen</code> */
private void createFile(int index, long len
) throws IOException, InterruptedException, TimeoutException {
final FileSystem fs = cluster.getFileSystem(index);
DFSTestUtil.createFile(fs, FILE_PATH, len, replication, RANDOM.nextLong());
DFSTestUtil.waitReplication(fs, FILE_PATH, replication);
}
}
private static Configuration createConf() {
final Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 1L);
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
return conf;
}
private void runTest(final int nNameNodes, final int nDataNodes,
Configuration conf) throws Exception {
LOG.info("nNameNodes=" + nNameNodes + ", nDataNodes=" + nDataNodes);
LOG.info("RUN_TEST -1");
final MiniDFSCluster cluster = new MiniDFSCluster
.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleFederatedTopology(nNameNodes))
.numDataNodes(nDataNodes)
.build();
LOG.info("RUN_TEST 0");
DFSTestUtil.setFederatedConfiguration(cluster, conf);
try {
cluster.waitActive();
LOG.info("RUN_TEST 1");
final Suite s = new Suite(cluster, nNameNodes, nDataNodes);
for(int i = 0; i < nNameNodes; i++) {
s.createFile(i, 1024);
}
LOG.info("RUN_TEST 2");
final String[] urls = new String[nNameNodes];
for(int i = 0; i < urls.length; i++) {
urls[i] = cluster.getFileSystem(i).getUri() + FILE_NAME;
LOG.info("urls[" + i + "]=" + urls[i]);
final String result = TestFsck.runFsck(conf, 0, false, urls[i]);
LOG.info("result=" + result);
Assert.assertTrue(result.contains("Status: HEALTHY"));
}
// Test viewfs
//
LOG.info("RUN_TEST 3");
final String[] vurls = new String[nNameNodes];
for (int i = 0; i < vurls.length; i++) {
String link = "/mount/nn_" + i + FILE_NAME;
ConfigUtil.addLink(conf, link, new URI(urls[i]));
vurls[i] = "viewfs:" + link;
}
for(int i = 0; i < vurls.length; i++) {
LOG.info("vurls[" + i + "]=" + vurls[i]);
final String result = TestFsck.runFsck(conf, 0, false, vurls[i]);
LOG.info("result=" + result);
Assert.assertTrue(result.contains("Status: HEALTHY"));
}
} finally {
cluster.shutdown();
}
LOG.info("RUN_TEST 6");
}
/** Test a cluster with even distribution,
* then a new empty node is added to the cluster
*/
@Test
public void testFsck() throws Exception {
final Configuration conf = createConf();
runTest(3, 1, conf);
}
}
| 5,432 | 33.169811 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathComponents.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertTrue;
import java.util.Arrays;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSUtil;
import org.junit.Test;
import com.google.common.base.Charsets;
/**
*
*/
public class TestPathComponents {
@Test
public void testBytes2ByteArray() throws Exception {
testString("/");
testString("/file");
testString("/directory/");
testString("//");
testString("/dir//file");
testString("/dir/dir1//");
}
public void testString(String str) throws Exception {
String pathString = str;
byte[][] oldPathComponents = INode.getPathComponents(pathString);
byte[][] newPathComponents =
DFSUtil.bytes2byteArray(pathString.getBytes(Charsets.UTF_8),
(byte) Path.SEPARATOR_CHAR);
if (oldPathComponents[0] == null) {
assertTrue(oldPathComponents[0] == newPathComponents[0]);
} else {
assertTrue("Path components do not match for " + pathString,
Arrays.deepEquals(oldPathComponents, newPathComponents));
}
}
}
| 1,947 | 31.466667 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.fail;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.DirectoryListingStartAfterNotFoundException;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.InvalidPathException;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathIsNotDirectoryException;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.Time;
import org.junit.Test;
import org.mockito.Mockito;
import com.google.common.collect.ImmutableList;
public class TestINodeFile {
// Re-enable symlinks for tests, see HADOOP-10020 and HADOOP-10052
static {
FileSystem.enableSymlinks();
}
public static final Log LOG = LogFactory.getLog(TestINodeFile.class);
static final short BLOCKBITS = 48;
static final long BLKSIZE_MAXVALUE = ~(0xffffL << BLOCKBITS);
private static final PermissionStatus perm = new PermissionStatus(
"userName", null, FsPermission.getDefault());
private short replication;
private long preferredBlockSize = 1024;
INodeFile createINodeFile(short replication, long preferredBlockSize) {
return new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID, null, perm, 0L, 0L,
null, replication, preferredBlockSize, (byte)0);
}
private static INodeFile createINodeFile(byte storagePolicyID) {
return new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID, null, perm, 0L, 0L,
null, (short)3, 1024L, storagePolicyID);
}
@Test
public void testStoragePolicyID () {
for(byte i = 0; i < 16; i++) {
final INodeFile f = createINodeFile(i);
assertEquals(i, f.getStoragePolicyID());
}
}
@Test(expected=IllegalArgumentException.class)
public void testStoragePolicyIdBelowLowerBound () throws IllegalArgumentException {
createINodeFile((byte)-1);
}
@Test(expected=IllegalArgumentException.class)
public void testStoragePolicyIdAboveUpperBound () throws IllegalArgumentException {
createINodeFile((byte)16);
}
/**
* Test for the Replication value. Sets a value and checks if it was set
* correct.
*/
@Test
public void testReplication () {
replication = 3;
preferredBlockSize = 128*1024*1024;
INodeFile inf = createINodeFile(replication, preferredBlockSize);
assertEquals("True has to be returned in this case", replication,
inf.getFileReplication());
}
/**
* IllegalArgumentException is expected for setting below lower bound
* for Replication.
* @throws IllegalArgumentException as the result
*/
@Test(expected=IllegalArgumentException.class)
public void testReplicationBelowLowerBound ()
throws IllegalArgumentException {
replication = -1;
preferredBlockSize = 128*1024*1024;
createINodeFile(replication, preferredBlockSize);
}
/**
* Test for the PreferredBlockSize value. Sets a value and checks if it was
* set correct.
*/
@Test
public void testPreferredBlockSize () {
replication = 3;
preferredBlockSize = 128*1024*1024;
INodeFile inf = createINodeFile(replication, preferredBlockSize);
assertEquals("True has to be returned in this case", preferredBlockSize,
inf.getPreferredBlockSize());
}
@Test
public void testPreferredBlockSizeUpperBound () {
replication = 3;
preferredBlockSize = BLKSIZE_MAXVALUE;
INodeFile inf = createINodeFile(replication, preferredBlockSize);
assertEquals("True has to be returned in this case", BLKSIZE_MAXVALUE,
inf.getPreferredBlockSize());
}
/**
* IllegalArgumentException is expected for setting below lower bound
* for PreferredBlockSize.
* @throws IllegalArgumentException as the result
*/
@Test(expected=IllegalArgumentException.class)
public void testPreferredBlockSizeBelowLowerBound ()
throws IllegalArgumentException {
replication = 3;
preferredBlockSize = -1;
createINodeFile(replication, preferredBlockSize);
}
/**
* IllegalArgumentException is expected for setting above upper bound
* for PreferredBlockSize.
* @throws IllegalArgumentException as the result
*/
@Test(expected=IllegalArgumentException.class)
public void testPreferredBlockSizeAboveUpperBound ()
throws IllegalArgumentException {
replication = 3;
preferredBlockSize = BLKSIZE_MAXVALUE+1;
createINodeFile(replication, preferredBlockSize);
}
@Test
public void testGetFullPathName() {
replication = 3;
preferredBlockSize = 128*1024*1024;
INodeFile inf = createINodeFile(replication, preferredBlockSize);
inf.setLocalName(DFSUtil.string2Bytes("f"));
INodeDirectory root = new INodeDirectory(HdfsConstants.GRANDFATHER_INODE_ID,
INodeDirectory.ROOT_NAME, perm, 0L);
INodeDirectory dir = new INodeDirectory(HdfsConstants.GRANDFATHER_INODE_ID,
DFSUtil.string2Bytes("d"), perm, 0L);
assertEquals("f", inf.getFullPathName());
dir.addChild(inf);
assertEquals("d"+Path.SEPARATOR+"f", inf.getFullPathName());
root.addChild(dir);
assertEquals(Path.SEPARATOR+"d"+Path.SEPARATOR+"f", inf.getFullPathName());
assertEquals(Path.SEPARATOR+"d", dir.getFullPathName());
assertEquals(Path.SEPARATOR, root.getFullPathName());
}
/**
* FSDirectory#unprotectedSetQuota creates a new INodeDirectoryWithQuota to
* replace the original INodeDirectory. Before HDFS-4243, the parent field of
* all the children INodes of the target INodeDirectory is not changed to
* point to the new INodeDirectoryWithQuota. This testcase tests this
* scenario.
*/
@Test
public void testGetFullPathNameAfterSetQuota() throws Exception {
long fileLen = 1024;
replication = 3;
Configuration conf = new Configuration();
MiniDFSCluster cluster = null;
try {
cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(replication).build();
cluster.waitActive();
FSNamesystem fsn = cluster.getNamesystem();
FSDirectory fsdir = fsn.getFSDirectory();
DistributedFileSystem dfs = cluster.getFileSystem();
// Create a file for test
final Path dir = new Path("/dir");
final Path file = new Path(dir, "file");
DFSTestUtil.createFile(dfs, file, fileLen, replication, 0L);
// Check the full path name of the INode associating with the file
INode fnode = fsdir.getINode(file.toString());
assertEquals(file.toString(), fnode.getFullPathName());
// Call FSDirectory#unprotectedSetQuota which calls
// INodeDirectory#replaceChild
dfs.setQuota(dir, Long.MAX_VALUE - 1, replication * fileLen * 10);
INodeDirectory dirNode = getDir(fsdir, dir);
assertEquals(dir.toString(), dirNode.getFullPathName());
assertTrue(dirNode.isWithQuota());
final Path newDir = new Path("/newdir");
final Path newFile = new Path(newDir, "file");
// Also rename dir
dfs.rename(dir, newDir, Options.Rename.OVERWRITE);
// /dir/file now should be renamed to /newdir/file
fnode = fsdir.getINode(newFile.toString());
// getFullPathName can return correct result only if the parent field of
// child node is set correctly
assertEquals(newFile.toString(), fnode.getFullPathName());
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test
public void testConcatBlocks() {
INodeFile origFile = createINodeFiles(1, "origfile")[0];
assertEquals("Number of blocks didn't match", origFile.numBlocks(), 1L);
INodeFile[] appendFiles = createINodeFiles(4, "appendfile");
origFile.concatBlocks(appendFiles);
assertEquals("Number of blocks didn't match", origFile.numBlocks(), 5L);
}
/**
* Creates the required number of files with one block each
* @param nCount Number of INodes to create
* @return Array of INode files
*/
private INodeFile[] createINodeFiles(int nCount, String fileNamePrefix) {
if(nCount <= 0)
return new INodeFile[1];
replication = 3;
preferredBlockSize = 128 * 1024 * 1024;
INodeFile[] iNodes = new INodeFile[nCount];
for (int i = 0; i < nCount; i++) {
iNodes[i] = new INodeFile(i, null, perm, 0L, 0L, null, replication,
preferredBlockSize, (byte)0);
iNodes[i].setLocalName(DFSUtil.string2Bytes(fileNamePrefix + i));
BlockInfo newblock = new BlockInfoContiguous(replication);
iNodes[i].addBlock(newblock);
}
return iNodes;
}
/**
* Test for the static {@link INodeFile#valueOf(INode, String)}
* and {@link INodeFileUnderConstruction#valueOf(INode, String)} methods.
* @throws IOException
*/
@Test
public void testValueOf () throws IOException {
final String path = "/testValueOf";
final short replication = 3;
{//cast from null
final INode from = null;
//cast to INodeFile, should fail
try {
INodeFile.valueOf(from, path);
fail();
} catch(FileNotFoundException fnfe) {
assertTrue(fnfe.getMessage().contains("File does not exist"));
}
//cast to INodeDirectory, should fail
try {
INodeDirectory.valueOf(from, path);
fail();
} catch(FileNotFoundException e) {
assertTrue(e.getMessage().contains("Directory does not exist"));
}
}
{//cast from INodeFile
final INode from = createINodeFile(replication, preferredBlockSize);
//cast to INodeFile, should success
final INodeFile f = INodeFile.valueOf(from, path);
assertTrue(f == from);
//cast to INodeDirectory, should fail
try {
INodeDirectory.valueOf(from, path);
fail();
} catch(PathIsNotDirectoryException e) {
// Expected
}
}
{//cast from INodeFileUnderConstruction
final INode from = new INodeFile(
HdfsConstants.GRANDFATHER_INODE_ID, null, perm, 0L, 0L, null, replication,
1024L, (byte)0);
from.asFile().toUnderConstruction("client", "machine");
//cast to INodeFile, should success
final INodeFile f = INodeFile.valueOf(from, path);
assertTrue(f == from);
//cast to INodeDirectory, should fail
try {
INodeDirectory.valueOf(from, path);
fail();
} catch(PathIsNotDirectoryException expected) {
// expected
}
}
{//cast from INodeDirectory
final INode from = new INodeDirectory(HdfsConstants.GRANDFATHER_INODE_ID, null,
perm, 0L);
//cast to INodeFile, should fail
try {
INodeFile.valueOf(from, path);
fail();
} catch(FileNotFoundException fnfe) {
assertTrue(fnfe.getMessage().contains("Path is not a file"));
}
//cast to INodeDirectory, should success
final INodeDirectory d = INodeDirectory.valueOf(from, path);
assertTrue(d == from);
}
}
/**
* This test verifies inode ID counter and inode map functionality.
*/
@Test
public void testInodeId() throws IOException {
Configuration conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT);
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
FSNamesystem fsn = cluster.getNamesystem();
long lastId = fsn.dir.getLastInodeId();
// Ensure root has the correct inode ID
// Last inode ID should be root inode ID and inode map size should be 1
int inodeCount = 1;
long expectedLastInodeId = INodeId.ROOT_INODE_ID;
assertEquals(fsn.dir.rootDir.getId(), INodeId.ROOT_INODE_ID);
assertEquals(expectedLastInodeId, lastId);
assertEquals(inodeCount, fsn.dir.getInodeMapSize());
// Create a directory
// Last inode ID and inode map size should increase by 1
FileSystem fs = cluster.getFileSystem();
Path path = new Path("/test1");
assertTrue(fs.mkdirs(path));
assertEquals(++expectedLastInodeId, fsn.dir.getLastInodeId());
assertEquals(++inodeCount, fsn.dir.getInodeMapSize());
// Create a file
// Last inode ID and inode map size should increase by 1
NamenodeProtocols nnrpc = cluster.getNameNodeRpc();
DFSTestUtil.createFile(fs, new Path("/test1/file"), 1024, (short) 1, 0);
assertEquals(++expectedLastInodeId, fsn.dir.getLastInodeId());
assertEquals(++inodeCount, fsn.dir.getInodeMapSize());
// Ensure right inode ID is returned in file status
HdfsFileStatus fileStatus = nnrpc.getFileInfo("/test1/file");
assertEquals(expectedLastInodeId, fileStatus.getFileId());
// Rename a directory
// Last inode ID and inode map size should not change
Path renamedPath = new Path("/test2");
assertTrue(fs.rename(path, renamedPath));
assertEquals(expectedLastInodeId, fsn.dir.getLastInodeId());
assertEquals(inodeCount, fsn.dir.getInodeMapSize());
// Delete test2/file and test2 and ensure inode map size decreases
assertTrue(fs.delete(renamedPath, true));
inodeCount -= 2;
assertEquals(inodeCount, fsn.dir.getInodeMapSize());
// Create and concat /test/file1 /test/file2
// Create /test1/file1 and /test1/file2
String file1 = "/test1/file1";
String file2 = "/test1/file2";
DFSTestUtil.createFile(fs, new Path(file1), 512, (short) 1, 0);
DFSTestUtil.createFile(fs, new Path(file2), 512, (short) 1, 0);
inodeCount += 3; // test1, file1 and file2 are created
expectedLastInodeId += 3;
assertEquals(inodeCount, fsn.dir.getInodeMapSize());
assertEquals(expectedLastInodeId, fsn.dir.getLastInodeId());
// Concat the /test1/file1 /test1/file2 into /test1/file2
nnrpc.concat(file2, new String[] {file1});
inodeCount--; // file1 and file2 are concatenated to file2
assertEquals(inodeCount, fsn.dir.getInodeMapSize());
assertEquals(expectedLastInodeId, fsn.dir.getLastInodeId());
assertTrue(fs.delete(new Path("/test1"), true));
inodeCount -= 2; // test1 and file2 is deleted
assertEquals(inodeCount, fsn.dir.getInodeMapSize());
// Make sure editlog is loaded correctly
cluster.restartNameNode();
cluster.waitActive();
fsn = cluster.getNamesystem();
assertEquals(expectedLastInodeId, fsn.dir.getLastInodeId());
assertEquals(inodeCount, fsn.dir.getInodeMapSize());
// Create two inodes test2 and test2/file2
DFSTestUtil.createFile(fs, new Path("/test2/file2"), 1024, (short) 1, 0);
expectedLastInodeId += 2;
inodeCount += 2;
assertEquals(expectedLastInodeId, fsn.dir.getLastInodeId());
assertEquals(inodeCount, fsn.dir.getInodeMapSize());
// create /test3, and /test3/file.
// /test3/file is a file under construction
FSDataOutputStream outStream = fs.create(new Path("/test3/file"));
assertTrue(outStream != null);
expectedLastInodeId += 2;
inodeCount += 2;
assertEquals(expectedLastInodeId, fsn.dir.getLastInodeId());
assertEquals(inodeCount, fsn.dir.getInodeMapSize());
// Apply editlogs to fsimage, ensure inodeUnderConstruction is handled
fsn.enterSafeMode(false);
fsn.saveNamespace();
fsn.leaveSafeMode();
outStream.close();
// The lastInodeId in fsimage should remain the same after reboot
cluster.restartNameNode();
cluster.waitActive();
fsn = cluster.getNamesystem();
assertEquals(expectedLastInodeId, fsn.dir.getLastInodeId());
assertEquals(inodeCount, fsn.dir.getInodeMapSize());
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test(timeout=120000)
public void testWriteToDeletedFile() throws IOException {
Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
.build();
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
Path path = new Path("/test1");
assertTrue(fs.mkdirs(path));
int size = conf.getInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 512);
byte[] data = new byte[size];
// Create one file
Path filePath = new Path("/test1/file");
FSDataOutputStream fos = fs.create(filePath);
// Delete the file
fs.delete(filePath, false);
// Add new block should fail since /test1/file has been deleted.
try {
fos.write(data, 0, data.length);
// make sure addBlock() request gets to NN immediately
fos.hflush();
fail("Write should fail after delete");
} catch (Exception e) {
/* Ignore */
} finally {
cluster.shutdown();
}
}
private Path getInodePath(long inodeId, String remainingPath) {
StringBuilder b = new StringBuilder();
b.append(Path.SEPARATOR).append(FSDirectory.DOT_RESERVED_STRING)
.append(Path.SEPARATOR).append(FSDirectory.DOT_INODES_STRING)
.append(Path.SEPARATOR).append(inodeId).append(Path.SEPARATOR)
.append(remainingPath);
Path p = new Path(b.toString());
LOG.info("Inode path is " + p);
return p;
}
/**
* Tests for addressing files using /.reserved/.inodes/<inodeID> in file system
* operations.
*/
@Test
public void testInodeIdBasedPaths() throws Exception {
Configuration conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
NamenodeProtocols nnRpc = cluster.getNameNodeRpc();
// FileSystem#mkdirs "/testInodeIdBasedPaths"
Path baseDir = getInodePath(INodeId.ROOT_INODE_ID, "testInodeIdBasedPaths");
Path baseDirRegPath = new Path("/testInodeIdBasedPaths");
fs.mkdirs(baseDir);
fs.exists(baseDir);
long baseDirFileId = nnRpc.getFileInfo(baseDir.toString()).getFileId();
// FileSystem#create file and FileSystem#close
Path testFileInodePath = getInodePath(baseDirFileId, "test1");
Path testFileRegularPath = new Path(baseDir, "test1");
final int testFileBlockSize = 1024;
FileSystemTestHelper.createFile(fs, testFileInodePath, 1, testFileBlockSize);
assertTrue(fs.exists(testFileInodePath));
// FileSystem#setPermission
FsPermission perm = new FsPermission((short)0666);
fs.setPermission(testFileInodePath, perm);
// FileSystem#getFileStatus and FileSystem#getPermission
FileStatus fileStatus = fs.getFileStatus(testFileInodePath);
assertEquals(perm, fileStatus.getPermission());
// FileSystem#setOwner
fs.setOwner(testFileInodePath, fileStatus.getOwner(), fileStatus.getGroup());
// FileSystem#setTimes
fs.setTimes(testFileInodePath, 0, 0);
fileStatus = fs.getFileStatus(testFileInodePath);
assertEquals(0, fileStatus.getModificationTime());
assertEquals(0, fileStatus.getAccessTime());
// FileSystem#setReplication
fs.setReplication(testFileInodePath, (short)3);
fileStatus = fs.getFileStatus(testFileInodePath);
assertEquals(3, fileStatus.getReplication());
fs.setReplication(testFileInodePath, (short)1);
// ClientProtocol#getPreferredBlockSize
assertEquals(testFileBlockSize,
nnRpc.getPreferredBlockSize(testFileInodePath.toString()));
/*
* HDFS-6749 added missing calls to FSDirectory.resolvePath in the
* following four methods. The calls below ensure that
* /.reserved/.inodes paths work properly. No need to check return
* values as these methods are tested elsewhere.
*/
{
fs.isFileClosed(testFileInodePath);
fs.getAclStatus(testFileInodePath);
fs.getXAttrs(testFileInodePath);
fs.listXAttrs(testFileInodePath);
fs.access(testFileInodePath, FsAction.READ_WRITE);
}
// symbolic link related tests
// Reserved path is not allowed as a target
String invalidTarget = new Path(baseDir, "invalidTarget").toString();
String link = new Path(baseDir, "link").toString();
testInvalidSymlinkTarget(nnRpc, invalidTarget, link);
// Test creating a link using reserved inode path
String validTarget = "/validtarget";
testValidSymlinkTarget(nnRpc, validTarget, link);
// FileSystem#append
fs.append(testFileInodePath);
// DistributedFileSystem#recoverLease
fs.recoverLease(testFileInodePath);
// Namenode#getBlockLocations
LocatedBlocks l1 = nnRpc.getBlockLocations(testFileInodePath.toString(),
0, Long.MAX_VALUE);
LocatedBlocks l2 = nnRpc.getBlockLocations(testFileRegularPath.toString(),
0, Long.MAX_VALUE);
checkEquals(l1, l2);
// FileSystem#rename - both the variants
Path renameDst = getInodePath(baseDirFileId, "test2");
fileStatus = fs.getFileStatus(testFileInodePath);
// Rename variant 1: rename and rename bacck
fs.rename(testFileInodePath, renameDst);
fs.rename(renameDst, testFileInodePath);
assertEquals(fileStatus, fs.getFileStatus(testFileInodePath));
// Rename variant 2: rename and rename bacck
fs.rename(testFileInodePath, renameDst, Rename.OVERWRITE);
fs.rename(renameDst, testFileInodePath, Rename.OVERWRITE);
assertEquals(fileStatus, fs.getFileStatus(testFileInodePath));
// FileSystem#getContentSummary
assertEquals(fs.getContentSummary(testFileRegularPath).toString(),
fs.getContentSummary(testFileInodePath).toString());
// FileSystem#listFiles
checkEquals(fs.listFiles(baseDirRegPath, false),
fs.listFiles(baseDir, false));
// FileSystem#delete
fs.delete(testFileInodePath, true);
assertFalse(fs.exists(testFileInodePath));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
private void testInvalidSymlinkTarget(NamenodeProtocols nnRpc,
String invalidTarget, String link) throws IOException {
try {
FsPermission perm = FsPermission.createImmutable((short)0755);
nnRpc.createSymlink(invalidTarget, link, perm, false);
fail("Symbolic link creation of target " + invalidTarget + " should fail");
} catch (InvalidPathException expected) {
// Expected
}
}
private void testValidSymlinkTarget(NamenodeProtocols nnRpc, String target,
String link) throws IOException {
FsPermission perm = FsPermission.createImmutable((short)0755);
nnRpc.createSymlink(target, link, perm, false);
assertEquals(target, nnRpc.getLinkTarget(link));
}
private static void checkEquals(LocatedBlocks l1, LocatedBlocks l2) {
List<LocatedBlock> list1 = l1.getLocatedBlocks();
List<LocatedBlock> list2 = l2.getLocatedBlocks();
assertEquals(list1.size(), list2.size());
for (int i = 0; i < list1.size(); i++) {
LocatedBlock b1 = list1.get(i);
LocatedBlock b2 = list2.get(i);
assertEquals(b1.getBlock(), b2.getBlock());
assertEquals(b1.getBlockSize(), b2.getBlockSize());
}
}
private static void checkEquals(RemoteIterator<LocatedFileStatus> i1,
RemoteIterator<LocatedFileStatus> i2) throws IOException {
while (i1.hasNext()) {
assertTrue(i2.hasNext());
// Compare all the fields but the path name, which is relative
// to the original path from listFiles.
LocatedFileStatus l1 = i1.next();
LocatedFileStatus l2 = i2.next();
assertEquals(l1.getAccessTime(), l2.getAccessTime());
assertEquals(l1.getBlockSize(), l2.getBlockSize());
assertEquals(l1.getGroup(), l2.getGroup());
assertEquals(l1.getLen(), l2.getLen());
assertEquals(l1.getModificationTime(), l2.getModificationTime());
assertEquals(l1.getOwner(), l2.getOwner());
assertEquals(l1.getPermission(), l2.getPermission());
assertEquals(l1.getReplication(), l2.getReplication());
}
assertFalse(i2.hasNext());
}
/**
* Check /.reserved path is reserved and cannot be created.
*/
@Test
public void testReservedFileNames() throws IOException {
Configuration conf = new Configuration();
MiniDFSCluster cluster = null;
try {
// First start a cluster with reserved file names check turned off
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
// Creation of directory or file with reserved path names is disallowed
ensureReservedFileNamesCannotBeCreated(fs, "/.reserved", false);
ensureReservedFileNamesCannotBeCreated(fs, "/.reserved", false);
Path reservedPath = new Path("/.reserved");
// Loading of fsimage or editlog with /.reserved directory should fail
// Mkdir "/.reserved reserved path with reserved path check turned off
FSDirectory.CHECK_RESERVED_FILE_NAMES = false;
fs.mkdirs(reservedPath);
assertTrue(fs.isDirectory(reservedPath));
ensureReservedFileNamesCannotBeLoaded(cluster);
// Loading of fsimage or editlog with /.reserved file should fail
// Create file "/.reserved reserved path with reserved path check turned off
FSDirectory.CHECK_RESERVED_FILE_NAMES = false;
ensureClusterRestartSucceeds(cluster);
fs.delete(reservedPath, true);
DFSTestUtil.createFile(fs, reservedPath, 10, (short)1, 0L);
assertTrue(!fs.isDirectory(reservedPath));
ensureReservedFileNamesCannotBeLoaded(cluster);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
private void ensureReservedFileNamesCannotBeCreated(FileSystem fs, String name,
boolean isDir) {
// Creation of directory or file with reserved path names is disallowed
Path reservedPath = new Path(name);
try {
if (isDir) {
fs.mkdirs(reservedPath);
} else {
DFSTestUtil.createFile(fs, reservedPath, 10, (short) 1, 0L);
}
fail((isDir ? "mkdir" : "create file") + " should be disallowed");
} catch (Exception expected) {
// ignored
}
}
private void ensureReservedFileNamesCannotBeLoaded(MiniDFSCluster cluster)
throws IOException {
// Turn on reserved file name checking. Loading of edits should fail
FSDirectory.CHECK_RESERVED_FILE_NAMES = true;
ensureClusterRestartFails(cluster);
// Turn off reserved file name checking and successfully load edits
FSDirectory.CHECK_RESERVED_FILE_NAMES = false;
ensureClusterRestartSucceeds(cluster);
// Turn on reserved file name checking. Loading of fsimage should fail
FSDirectory.CHECK_RESERVED_FILE_NAMES = true;
ensureClusterRestartFails(cluster);
}
private void ensureClusterRestartFails(MiniDFSCluster cluster) {
try {
cluster.restartNameNode();
fail("Cluster should not have successfully started");
} catch (Exception expected) {
LOG.info("Expected exception thrown " + expected);
}
assertFalse(cluster.isClusterUp());
}
private void ensureClusterRestartSucceeds(MiniDFSCluster cluster)
throws IOException {
cluster.restartNameNode();
cluster.waitActive();
assertTrue(cluster.isClusterUp());
}
/**
* For a given path, build a tree of INodes and return the leaf node.
*/
private INode createTreeOfInodes(String path) throws QuotaExceededException {
byte[][] components = INode.getPathComponents(path);
FsPermission perm = FsPermission.createImmutable((short)0755);
PermissionStatus permstatus = PermissionStatus.createImmutable("", "", perm);
long id = 0;
INodeDirectory prev = new INodeDirectory(++id, new byte[0], permstatus, 0);
INodeDirectory dir = null;
for (byte[] component : components) {
if (component.length == 0) {
continue;
}
System.out.println("Adding component " + DFSUtil.bytes2String(component));
dir = new INodeDirectory(++id, component, permstatus, 0);
prev.addChild(dir, false, Snapshot.CURRENT_STATE_ID);
prev = dir;
}
return dir; // Last Inode in the chain
}
/**
* Test for {@link FSDirectory#getPathComponents(INode)}
*/
@Test
public void testGetPathFromInode() throws QuotaExceededException {
String path = "/a/b/c";
INode inode = createTreeOfInodes(path);
byte[][] expected = INode.getPathComponents(path);
byte[][] actual = FSDirectory.getPathComponents(inode);
DFSTestUtil.checkComponentsEquals(expected, actual);
}
/**
* Tests for {@link FSDirectory#resolvePath(String, byte[][], FSDirectory)}
*/
@Test
public void testInodePath() throws IOException {
// For a non .inodes path the regular components are returned
String path = "/a/b/c";
INode inode = createTreeOfInodes(path);
// For an any inode look up return inode corresponding to "c" from /a/b/c
FSDirectory fsd = Mockito.mock(FSDirectory.class);
Mockito.doReturn(inode).when(fsd).getInode(Mockito.anyLong());
// Null components
assertEquals("/test", FSDirectory.resolvePath("/test", null, fsd));
// Tests for FSDirectory#resolvePath()
// Non inode regular path
byte[][] components = INode.getPathComponents(path);
String resolvedPath = FSDirectory.resolvePath(path, components, fsd);
assertEquals(path, resolvedPath);
// Inode path with no trailing separator
components = INode.getPathComponents("/.reserved/.inodes/1");
resolvedPath = FSDirectory.resolvePath(path, components, fsd);
assertEquals(path, resolvedPath);
// Inode path with trailing separator
components = INode.getPathComponents("/.reserved/.inodes/1/");
assertEquals(path, resolvedPath);
// Inode relative path
components = INode.getPathComponents("/.reserved/.inodes/1/d/e/f");
resolvedPath = FSDirectory.resolvePath(path, components, fsd);
assertEquals("/a/b/c/d/e/f", resolvedPath);
// A path with just .inodes returns the path as is
String testPath = "/.reserved/.inodes";
components = INode.getPathComponents(testPath);
resolvedPath = FSDirectory.resolvePath(testPath, components, fsd);
assertEquals(testPath, resolvedPath);
// Root inode path
testPath = "/.reserved/.inodes/" + INodeId.ROOT_INODE_ID;
components = INode.getPathComponents(testPath);
resolvedPath = FSDirectory.resolvePath(testPath, components, fsd);
assertEquals("/", resolvedPath);
// An invalid inode path should remain unresolved
testPath = "/.invalid/.inodes/1";
components = INode.getPathComponents(testPath);
resolvedPath = FSDirectory.resolvePath(testPath, components, fsd);
assertEquals(testPath, resolvedPath);
// Test path with nonexistent(deleted or wrong id) inode
Mockito.doReturn(null).when(fsd).getInode(Mockito.anyLong());
testPath = "/.reserved/.inodes/1234";
components = INode.getPathComponents(testPath);
try {
String realPath = FSDirectory.resolvePath(testPath, components, fsd);
fail("Path should not be resolved:" + realPath);
} catch (IOException e) {
assertTrue(e instanceof FileNotFoundException);
}
}
private static INodeDirectory getDir(final FSDirectory fsdir, final Path dir)
throws IOException {
final String dirStr = dir.toString();
return INodeDirectory.valueOf(fsdir.getINode(dirStr), dirStr);
}
@Test
public void testDotdotInodePath() throws Exception {
final Configuration conf = new Configuration();
MiniDFSCluster cluster = null;
DFSClient client = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final DistributedFileSystem hdfs = cluster.getFileSystem();
final FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
final Path dir = new Path("/dir");
hdfs.mkdirs(dir);
long dirId = fsdir.getINode(dir.toString()).getId();
long parentId = fsdir.getINode("/").getId();
String testPath = "/.reserved/.inodes/" + dirId + "/..";
client = new DFSClient(NameNode.getAddress(conf), conf);
HdfsFileStatus status = client.getFileInfo(testPath);
assertTrue(parentId == status.getFileId());
// Test root's parent is still root
testPath = "/.reserved/.inodes/" + parentId + "/..";
status = client.getFileInfo(testPath);
assertTrue(parentId == status.getFileId());
} finally {
IOUtils.cleanup(LOG, client);
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test
public void testLocationLimitInListingOps() throws Exception {
final Configuration conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_LIST_LIMIT, 9); // 3 blocks * 3 replicas
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
final DistributedFileSystem hdfs = cluster.getFileSystem();
ArrayList<String> source = new ArrayList<String>();
// tmp1 holds files with 3 blocks, 3 replicas
// tmp2 holds files with 3 blocks, 1 replica
hdfs.mkdirs(new Path("/tmp1"));
hdfs.mkdirs(new Path("/tmp2"));
source.add("f1");
source.add("f2");
int numEntries = source.size();
for (int j=0;j<numEntries;j++) {
DFSTestUtil.createFile(hdfs, new Path("/tmp1/"+source.get(j)), 4096,
3*1024-100, 1024, (short) 3, 0);
}
byte[] start = HdfsFileStatus.EMPTY_NAME;
for (int j=0;j<numEntries;j++) {
DirectoryListing dl = cluster.getNameNodeRpc().getListing("/tmp1",
start, true);
assertTrue(dl.getPartialListing().length == 1);
for (int i=0;i<dl.getPartialListing().length; i++) {
source.remove(dl.getPartialListing()[i].getLocalName());
}
start = dl.getLastName();
}
// Verify we have listed all entries in the directory.
assertTrue(source.size() == 0);
// Now create 6 files, each with 3 locations. Should take 2 iterations of 3
source.add("f1");
source.add("f2");
source.add("f3");
source.add("f4");
source.add("f5");
source.add("f6");
numEntries = source.size();
for (int j=0;j<numEntries;j++) {
DFSTestUtil.createFile(hdfs, new Path("/tmp2/"+source.get(j)), 4096,
3*1024-100, 1024, (short) 1, 0);
}
start = HdfsFileStatus.EMPTY_NAME;
for (int j=0;j<numEntries/3;j++) {
DirectoryListing dl = cluster.getNameNodeRpc().getListing("/tmp2",
start, true);
assertTrue(dl.getPartialListing().length == 3);
for (int i=0;i<dl.getPartialListing().length; i++) {
source.remove(dl.getPartialListing()[i].getLocalName());
}
start = dl.getLastName();
}
// Verify we have listed all entries in tmp2.
assertTrue(source.size() == 0);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test
public void testFilesInGetListingOps() throws Exception {
final Configuration conf = new Configuration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final DistributedFileSystem hdfs = cluster.getFileSystem();
final FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
hdfs.mkdirs(new Path("/tmp"));
DFSTestUtil.createFile(hdfs, new Path("/tmp/f1"), 0, (short) 1, 0);
DFSTestUtil.createFile(hdfs, new Path("/tmp/f2"), 0, (short) 1, 0);
DFSTestUtil.createFile(hdfs, new Path("/tmp/f3"), 0, (short) 1, 0);
DirectoryListing dl = cluster.getNameNodeRpc().getListing("/tmp",
HdfsFileStatus.EMPTY_NAME, false);
assertTrue(dl.getPartialListing().length == 3);
String f2 = new String("f2");
dl = cluster.getNameNodeRpc().getListing("/tmp", f2.getBytes(), false);
assertTrue(dl.getPartialListing().length == 1);
INode f2INode = fsdir.getINode("/tmp/f2");
String f2InodePath = "/.reserved/.inodes/" + f2INode.getId();
dl = cluster.getNameNodeRpc().getListing("/tmp", f2InodePath.getBytes(),
false);
assertTrue(dl.getPartialListing().length == 1);
// Test the deleted startAfter file
hdfs.delete(new Path("/tmp/f2"), false);
try {
dl = cluster.getNameNodeRpc().getListing("/tmp",
f2InodePath.getBytes(), false);
fail("Didn't get exception for the deleted startAfter token.");
} catch (IOException e) {
assertTrue(e instanceof DirectoryListingStartAfterNotFoundException);
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test
public void testFileUnderConstruction() {
replication = 3;
final INodeFile file = new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID, null,
perm, 0L, 0L, null, replication, 1024L, (byte)0);
assertFalse(file.isUnderConstruction());
final String clientName = "client";
final String clientMachine = "machine";
file.toUnderConstruction(clientName, clientMachine);
assertTrue(file.isUnderConstruction());
FileUnderConstructionFeature uc = file.getFileUnderConstructionFeature();
assertEquals(clientName, uc.getClientName());
assertEquals(clientMachine, uc.getClientMachine());
file.toCompleteFile(Time.now());
assertFalse(file.isUnderConstruction());
}
@Test
public void testXAttrFeature() {
replication = 3;
preferredBlockSize = 128*1024*1024;
INodeFile inf = createINodeFile(replication, preferredBlockSize);
ImmutableList.Builder<XAttr> builder = new ImmutableList.Builder<XAttr>();
XAttr xAttr = new XAttr.Builder().setNameSpace(XAttr.NameSpace.USER).
setName("a1").setValue(new byte[]{0x31, 0x32, 0x33}).build();
builder.add(xAttr);
XAttrFeature f = new XAttrFeature(builder.build());
inf.addXAttrFeature(f);
XAttrFeature f1 = inf.getXAttrFeature();
assertEquals(xAttr, f1.getXAttrs().get(0));
inf.removeXAttrFeature();
f1 = inf.getXAttrFeature();
assertEquals(f1, null);
}
@Test
public void testClearBlocks() {
INodeFile toBeCleared = createINodeFiles(1, "toBeCleared")[0];
assertEquals(1, toBeCleared.getBlocks().length);
toBeCleared.clearBlocks();
assertNull(toBeCleared.getBlocks());
}
}
| 41,516 | 36.234978 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.File;
import java.io.IOException;
import java.util.Iterator;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.common.Util;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.util.Time;
/**
* OfflineEditsViewerHelper is a helper class for TestOfflineEditsViewer,
* it performs NN operations that generate all op codes
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class OfflineEditsViewerHelper {
private static final Log LOG =
LogFactory.getLog(OfflineEditsViewerHelper.class);
final long blockSize = 512;
MiniDFSCluster cluster = null;
final Configuration config = new Configuration();
/**
* Generates edits with all op codes and returns the edits filename
*/
public String generateEdits() throws IOException {
CheckpointSignature signature = runOperations();
return getEditsFilename(signature);
}
/**
* Get edits filename
*
* @return edits file name for cluster
*/
private String getEditsFilename(CheckpointSignature sig) throws IOException {
FSImage image = cluster.getNameNode().getFSImage();
// it was set up to only have ONE StorageDirectory
Iterator<StorageDirectory> it
= image.getStorage().dirIterator(NameNodeDirType.EDITS);
StorageDirectory sd = it.next();
File ret = NNStorage.getFinalizedEditsFile(
sd, 1, sig.curSegmentTxId - 1);
assert ret.exists() : "expected " + ret + " exists";
return ret.getAbsolutePath();
}
/**
* Sets up a MiniDFSCluster, configures it to create one edits file,
* starts DelegationTokenSecretManager (to get security op codes)
*
* @param dfsDir DFS directory (where to setup MiniDFS cluster)
*/
public void startCluster(String dfsDir) throws IOException {
// same as manageDfsDirs but only one edits file instead of two
config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
Util.fileAsURI(new File(dfsDir, "name")).toString());
config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,
Util.fileAsURI(new File(dfsDir, "namesecondary1")).toString());
// blocksize for concat (file size must be multiple of blocksize)
config.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
// for security to work (fake JobTracker user)
config.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL,
"RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" + "DEFAULT");
config.setBoolean(
DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
config.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
cluster =
new MiniDFSCluster.Builder(config).manageNameDfsDirs(false).build();
cluster.waitClusterUp();
}
/**
* Shutdown the cluster
*/
public void shutdownCluster() throws IOException {
if (cluster != null) {
cluster.shutdown();
}
}
/**
* Run file operations to create edits for all op codes
* to be tested.
*
* the following op codes are deprecated and therefore not tested:
*
* OP_DATANODE_ADD ( 5)
* OP_DATANODE_REMOVE ( 6)
* OP_SET_NS_QUOTA (11)
* OP_CLEAR_NS_QUOTA (12)
*/
private CheckpointSignature runOperations() throws IOException {
LOG.info("Creating edits by performing fs operations");
// no check, if it's not it throws an exception which is what we want
DistributedFileSystem dfs = cluster.getFileSystem();
DFSTestUtil.runOperations(cluster, dfs, cluster.getConfiguration(0),
dfs.getDefaultBlockSize(), 0);
// OP_ROLLING_UPGRADE_START
cluster.getNamesystem().getEditLog().logStartRollingUpgrade(Time.now());
// OP_ROLLING_UPGRADE_FINALIZE
cluster.getNamesystem().getEditLog().logFinalizeRollingUpgrade(Time.now());
// Force a roll so we get an OP_END_LOG_SEGMENT txn
return cluster.getNameNodeRpc().rollEditLog();
}
}
| 5,319 | 36.464789 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameCache.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import org.junit.Test;
/**
* Test for {@link NameCache} class
*/
public class TestNameCache {
@Test
public void testDictionary() throws Exception {
// Create dictionary with useThreshold 2
NameCache<String> cache =
new NameCache<String>(2);
String[] matching = {"part1", "part10000000", "fileabc", "abc", "filepart"};
String[] notMatching = {"spart1", "apart", "abcd", "def"};
for (String s : matching) {
// Add useThreshold times so the names are promoted to dictionary
cache.put(s);
assertTrue(s == cache.put(s));
}
for (String s : notMatching) {
// Add < useThreshold times so the names are not promoted to dictionary
cache.put(s);
}
// Mark dictionary as initialized
cache.initialized();
for (String s : matching) {
verifyNameReuse(cache, s, true);
}
// Check dictionary size
assertEquals(matching.length, cache.size());
for (String s : notMatching) {
verifyNameReuse(cache, s, false);
}
cache.reset();
cache.initialized();
for (String s : matching) {
verifyNameReuse(cache, s, false);
}
for (String s : notMatching) {
verifyNameReuse(cache, s, false);
}
}
private void verifyNameReuse(NameCache<String> cache, String s, boolean reused) {
cache.put(s);
int lookupCount = cache.getLookupCount();
if (reused) {
// Dictionary returns non null internal value
assertNotNull(cache.put(s));
// Successful lookup increments lookup count
assertEquals(lookupCount + 1, cache.getLookupCount());
} else {
// Dictionary returns null - since name is not in the dictionary
assertNull(cache.put(s));
// Lookup count remains the same
assertEquals(lookupCount, cache.getLookupCount());
}
}
}
| 2,874 | 30.944444 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeXAttr.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.util.Map;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.junit.Assert;
import org.junit.Test;
/**
* Tests NameNode interaction for all XAttr APIs.
* This test suite covers restarting NN, saving new checkpoint,
* and also includes test of xattrs for symlinks.
*/
public class TestNameNodeXAttr extends FSXAttrBaseTest {
private static final Path linkParent = new Path("/symdir1");
private static final Path targetParent = new Path("/symdir2");
private static final Path link = new Path(linkParent, "link");
private static final Path target = new Path(targetParent, "target");
@Test(timeout = 120000)
public void testXAttrSymlinks() throws Exception {
fs.mkdirs(linkParent);
fs.mkdirs(targetParent);
DFSTestUtil.createFile(fs, target, 1024, (short)3, 0xBEEFl);
fs.createSymlink(target, link, false);
fs.setXAttr(target, name1, value1);
fs.setXAttr(target, name2, value2);
Map<String, byte[]> xattrs = fs.getXAttrs(link);
Assert.assertEquals(xattrs.size(), 2);
Assert.assertArrayEquals(value1, xattrs.get(name1));
Assert.assertArrayEquals(value2, xattrs.get(name2));
fs.setXAttr(link, name3, null);
xattrs = fs.getXAttrs(target);
Assert.assertEquals(xattrs.size(), 3);
Assert.assertArrayEquals(value1, xattrs.get(name1));
Assert.assertArrayEquals(value2, xattrs.get(name2));
Assert.assertArrayEquals(new byte[0], xattrs.get(name3));
fs.removeXAttr(link, name1);
xattrs = fs.getXAttrs(target);
Assert.assertEquals(xattrs.size(), 2);
Assert.assertArrayEquals(value2, xattrs.get(name2));
Assert.assertArrayEquals(new byte[0], xattrs.get(name3));
fs.removeXAttr(target, name3);
xattrs = fs.getXAttrs(link);
Assert.assertEquals(xattrs.size(), 1);
Assert.assertArrayEquals(value2, xattrs.get(name2));
fs.delete(linkParent, true);
fs.delete(targetParent, true);
}
}
| 2,829 | 36.236842 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMalformedURLs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import static org.junit.Assert.assertNotEquals;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class TestMalformedURLs {
private MiniDFSCluster cluster;
Configuration config;
@Before
public void setUp() throws Exception {
Configuration.addDefaultResource("hdfs-site.malformed.xml");
config = new Configuration();
}
@Test
public void testTryStartingCluster() throws Exception {
// if we are able to start the cluster, it means
// that we were able to read the configuration
// correctly.
assertNotEquals(config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY),
config.getTrimmed(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY));
cluster = new MiniDFSCluster.Builder(config).build();
cluster.waitActive();
}
@After
public void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
}
| 1,907 | 30.8 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySummary.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.Map;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.hdfs.server.namenode.StoragePolicySummary.StorageTypeAllocation;
import org.junit.Assert;
import org.junit.Test;
public class TestStoragePolicySummary {
private Map<String, Long> convertToStringMap(StoragePolicySummary sts) {
LinkedHashMap<String, Long> actualOutput = new LinkedHashMap<>();
for (Map.Entry<StorageTypeAllocation, Long> entry:
StoragePolicySummary.sortByComparator(sts.storageComboCounts)) {
actualOutput.put(entry.getKey().toString(), entry.getValue());
}
return actualOutput;
}
@Test
public void testMultipleHots() {
BlockStoragePolicySuite bsps = BlockStoragePolicySuite.createDefaultSuite();
StoragePolicySummary sts = new StoragePolicySummary(bsps.getAllPolicies());
BlockStoragePolicy hot = bsps.getPolicy("HOT");
sts.add(new StorageType[]{StorageType.DISK},hot);
sts.add(new StorageType[]{StorageType.DISK,StorageType.DISK},hot);
sts.add(new StorageType[]{StorageType.DISK,
StorageType.DISK,StorageType.DISK},hot);
sts.add(new StorageType[]{StorageType.DISK,
StorageType.DISK,StorageType.DISK,StorageType.DISK},hot);
Map<String, Long> actualOutput = convertToStringMap(sts);
Assert.assertEquals(4,actualOutput.size());
Map<String, Long> expectedOutput = new HashMap<>();
expectedOutput.put("HOT|DISK:1(HOT)", 1l);
expectedOutput.put("HOT|DISK:2(HOT)", 1l);
expectedOutput.put("HOT|DISK:3(HOT)", 1l);
expectedOutput.put("HOT|DISK:4(HOT)", 1l);
Assert.assertEquals(expectedOutput,actualOutput);
}
@Test
public void testMultipleHotsWithDifferentCounts() {
BlockStoragePolicySuite bsps = BlockStoragePolicySuite.createDefaultSuite();
StoragePolicySummary sts = new StoragePolicySummary(bsps.getAllPolicies());
BlockStoragePolicy hot = bsps.getPolicy("HOT");
sts.add(new StorageType[]{StorageType.DISK},hot);
sts.add(new StorageType[]{StorageType.DISK,StorageType.DISK},hot);
sts.add(new StorageType[]{StorageType.DISK,StorageType.DISK},hot);
sts.add(new StorageType[]{StorageType.DISK,
StorageType.DISK,StorageType.DISK},hot);
sts.add(new StorageType[]{StorageType.DISK,
StorageType.DISK,StorageType.DISK},hot);
sts.add(new StorageType[]{StorageType.DISK,
StorageType.DISK,StorageType.DISK,StorageType.DISK},hot);
Map<String, Long> actualOutput = convertToStringMap(sts);
Assert.assertEquals(4,actualOutput.size());
Map<String, Long> expectedOutput = new HashMap<>();
expectedOutput.put("HOT|DISK:1(HOT)", 1l);
expectedOutput.put("HOT|DISK:2(HOT)", 2l);
expectedOutput.put("HOT|DISK:3(HOT)", 2l);
expectedOutput.put("HOT|DISK:4(HOT)", 1l);
Assert.assertEquals(expectedOutput,actualOutput);
}
@Test
public void testMultipleWarmsInDifferentOrder() {
BlockStoragePolicySuite bsps = BlockStoragePolicySuite.createDefaultSuite();
StoragePolicySummary sts = new StoragePolicySummary(bsps.getAllPolicies());
BlockStoragePolicy warm = bsps.getPolicy("WARM");
//DISK:1,ARCHIVE:1
sts.add(new StorageType[]{StorageType.DISK,StorageType.ARCHIVE},warm);
sts.add(new StorageType[]{StorageType.ARCHIVE,StorageType.DISK},warm);
//DISK:2,ARCHIVE:1
sts.add(new StorageType[]{StorageType.ARCHIVE,
StorageType.DISK,StorageType.DISK},warm);
sts.add(new StorageType[]{StorageType.DISK,
StorageType.ARCHIVE,StorageType.DISK},warm);
sts.add(new StorageType[]{StorageType.DISK,
StorageType.DISK,StorageType.ARCHIVE},warm);
//DISK:1,ARCHIVE:2
sts.add(new StorageType[]{StorageType.DISK,
StorageType.ARCHIVE,StorageType.ARCHIVE},warm);
sts.add(new StorageType[]{StorageType.ARCHIVE,
StorageType.DISK,StorageType.ARCHIVE},warm);
sts.add(new StorageType[]{StorageType.ARCHIVE,
StorageType.ARCHIVE,StorageType.DISK},warm);
//DISK:2,ARCHIVE:2
sts.add(new StorageType[]{StorageType.ARCHIVE,
StorageType.ARCHIVE,StorageType.DISK,StorageType.DISK},warm);
Map<String, Long> actualOutput = convertToStringMap(sts);
Assert.assertEquals(4,actualOutput.size());
Map<String, Long> expectedOutput = new HashMap<>();
expectedOutput.put("WARM|DISK:1,ARCHIVE:1(WARM)", 2l);
expectedOutput.put("WARM|DISK:2,ARCHIVE:1", 3l);
expectedOutput.put("WARM|DISK:1,ARCHIVE:2(WARM)", 3l);
expectedOutput.put("WARM|DISK:2,ARCHIVE:2", 1l);
Assert.assertEquals(expectedOutput,actualOutput);
}
@Test
public void testDifferentSpecifiedPolicies() {
BlockStoragePolicySuite bsps = BlockStoragePolicySuite.createDefaultSuite();
StoragePolicySummary sts = new StoragePolicySummary(bsps.getAllPolicies());
BlockStoragePolicy hot = bsps.getPolicy("HOT");
BlockStoragePolicy warm = bsps.getPolicy("WARM");
BlockStoragePolicy cold = bsps.getPolicy("COLD");
//DISK:3
sts.add(new StorageType[]{StorageType.DISK,StorageType.DISK,StorageType.DISK},hot);
sts.add(new StorageType[]{StorageType.DISK,StorageType.DISK,StorageType.DISK},hot);
sts.add(new StorageType[]{StorageType.DISK,StorageType.DISK,StorageType.DISK},warm);
sts.add(new StorageType[]{StorageType.DISK,StorageType.DISK,StorageType.DISK},cold);
//DISK:1,ARCHIVE:2
sts.add(new StorageType[]{StorageType.DISK,
StorageType.ARCHIVE,StorageType.ARCHIVE},hot);
sts.add(new StorageType[]{StorageType.ARCHIVE,
StorageType.DISK,StorageType.ARCHIVE},warm);
sts.add(new StorageType[]{StorageType.ARCHIVE,
StorageType.ARCHIVE,StorageType.DISK},cold);
sts.add(new StorageType[]{StorageType.ARCHIVE,
StorageType.ARCHIVE,StorageType.DISK},cold);
//ARCHIVE:3
sts.add(new StorageType[]{StorageType.ARCHIVE,
StorageType.ARCHIVE,StorageType.ARCHIVE},hot);
sts.add(new StorageType[]{StorageType.ARCHIVE,
StorageType.ARCHIVE,StorageType.ARCHIVE},hot);
sts.add(new StorageType[]{StorageType.ARCHIVE,
StorageType.ARCHIVE,StorageType.ARCHIVE},warm);
sts.add(new StorageType[]{StorageType.ARCHIVE,
StorageType.ARCHIVE,StorageType.ARCHIVE},cold);
Map<String, Long> actualOutput = convertToStringMap(sts);
Assert.assertEquals(9,actualOutput.size());
Map<String, Long> expectedOutput = new HashMap<>();
expectedOutput.put("HOT|DISK:3(HOT)", 2l);
expectedOutput.put("COLD|DISK:1,ARCHIVE:2(WARM)", 2l);
expectedOutput.put("HOT|ARCHIVE:3(COLD)", 2l);
expectedOutput.put("WARM|DISK:3(HOT)", 1l);
expectedOutput.put("COLD|DISK:3(HOT)", 1l);
expectedOutput.put("WARM|ARCHIVE:3(COLD)", 1l);
expectedOutput.put("WARM|DISK:1,ARCHIVE:2(WARM)", 1l);
expectedOutput.put("COLD|ARCHIVE:3(COLD)", 1l);
expectedOutput.put("HOT|DISK:1,ARCHIVE:2(WARM)", 1l);
Assert.assertEquals(expectedOutput,actualOutput);
}
@Test
public void testSortInDescendingOrder() {
BlockStoragePolicySuite bsps = BlockStoragePolicySuite.createDefaultSuite();
StoragePolicySummary sts = new StoragePolicySummary(bsps.getAllPolicies());
BlockStoragePolicy hot = bsps.getPolicy("HOT");
BlockStoragePolicy warm = bsps.getPolicy("WARM");
BlockStoragePolicy cold = bsps.getPolicy("COLD");
//DISK:3
sts.add(new StorageType[]{StorageType.DISK,StorageType.DISK,StorageType.DISK},hot);
sts.add(new StorageType[]{StorageType.DISK,StorageType.DISK,StorageType.DISK},hot);
//DISK:1,ARCHIVE:2
sts.add(new StorageType[]{StorageType.DISK,
StorageType.ARCHIVE,StorageType.ARCHIVE},warm);
sts.add(new StorageType[]{StorageType.ARCHIVE,
StorageType.DISK,StorageType.ARCHIVE},warm);
sts.add(new StorageType[]{StorageType.ARCHIVE,
StorageType.ARCHIVE,StorageType.DISK},warm);
//ARCHIVE:3
sts.add(new StorageType[]{StorageType.ARCHIVE,
StorageType.ARCHIVE,StorageType.ARCHIVE},cold);
sts.add(new StorageType[]{StorageType.ARCHIVE,
StorageType.ARCHIVE,StorageType.ARCHIVE},cold);
sts.add(new StorageType[]{StorageType.ARCHIVE,
StorageType.ARCHIVE,StorageType.ARCHIVE},cold);
sts.add(new StorageType[]{StorageType.ARCHIVE,
StorageType.ARCHIVE,StorageType.ARCHIVE},cold);
Map<String, Long> actualOutput = convertToStringMap(sts);
Assert.assertEquals(3,actualOutput.size());
Map<String, Long> expectedOutput = new LinkedHashMap<>();
expectedOutput.put("COLD|ARCHIVE:3(COLD)", 4l);
expectedOutput.put("WARM|DISK:1,ARCHIVE:2(WARM)", 3l);
expectedOutput.put("HOT|DISK:3(HOT)", 2l);
Assert.assertEquals(expectedOutput.toString(),actualOutput.toString());
}
}
| 9,661 | 46.831683 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRetryCacheMetrics.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
import org.apache.hadoop.ipc.metrics.RetryCacheMetrics;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.junit.Before;
import org.junit.After;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY;
/**
* Tests for ensuring the namenode retry cache metrics works correctly for
* non-idempotent requests.
*
* Retry cache works based on tracking previously received request based on the
* ClientId and CallId received in RPC requests and storing the response. The
* response is replayed on retry when the same request is received again.
*
*/
public class TestNameNodeRetryCacheMetrics {
private MiniDFSCluster cluster;
private FSNamesystem namesystem;
private DistributedFileSystem filesystem;
private final int namenodeId = 0;
private Configuration conf;
private RetryCacheMetrics metrics;
private DFSClient client;
/** Start a cluster */
@Before
public void setup() throws Exception {
conf = new HdfsConfiguration();
conf.setBoolean(DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true);
conf.setInt(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, 2);
cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(3)
.build();
cluster.waitActive();
cluster.transitionToActive(namenodeId);
HATestUtil.setFailoverConfigurations(cluster, conf);
filesystem = (DistributedFileSystem) HATestUtil.configureFailoverFs(cluster, conf);
namesystem = cluster.getNamesystem(namenodeId);
metrics = namesystem.getRetryCache().getMetricsForTests();
}
/**
* Cleanup after the test
* @throws IOException
**/
@After
public void cleanup() throws IOException {
if (cluster != null) {
cluster.shutdown();
}
}
@Test
public void testRetryCacheMetrics() throws IOException {
checkMetrics(0, 0, 0);
// DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY is 2 ,
// so 2 requests are dropped at first.
// After that, 1 request will reach NameNode correctly.
trySaveNamespace();
checkMetrics(2, 0, 1);
// RetryCache will be cleared after Namesystem#close()
namesystem.close();
checkMetrics(2, 1, 1);
}
private void checkMetrics(long hit, long cleared, long updated) {
assertEquals("CacheHit", hit, metrics.getCacheHit());
assertEquals("CacheCleared", cleared, metrics.getCacheCleared());
assertEquals("CacheUpdated", updated, metrics.getCacheUpdated());
}
private void trySaveNamespace() throws IOException {
filesystem.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
filesystem.saveNamespace();
filesystem.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
}
}
| 4,084 | 34.833333 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import com.google.common.collect.Lists;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
import java.util.ArrayList;
import static org.junit.Assert.assertThat;
import static org.mockito.Mockito.*;
public class TestLeaseManager {
@Rule
public Timeout timeout = new Timeout(300000);
@Test
public void testRemoveLeases() throws Exception {
FSNamesystem fsn = mock(FSNamesystem.class);
LeaseManager lm = new LeaseManager(fsn);
ArrayList<Long> ids = Lists.newArrayList(INodeId.ROOT_INODE_ID + 1,
INodeId.ROOT_INODE_ID + 2, INodeId.ROOT_INODE_ID + 3,
INodeId.ROOT_INODE_ID + 4);
for (long id : ids) {
lm.addLease("foo", id);
}
assertEquals(4, lm.getINodeIdWithLeases().size());
synchronized (lm) {
lm.removeLeases(ids);
}
assertEquals(0, lm.getINodeIdWithLeases().size());
}
/** Check that even if LeaseManager.checkLease is not able to relinquish
* leases, the Namenode does't enter an infinite loop while holding the FSN
* write lock and thus become unresponsive
*/
@Test
public void testCheckLeaseNotInfiniteLoop() {
LeaseManager lm = new LeaseManager(makeMockFsNameSystem());
//Make sure the leases we are going to add exceed the hard limit
lm.setLeasePeriod(0, 0);
//Add some leases to the LeaseManager
lm.addLease("holder1", INodeId.ROOT_INODE_ID + 1);
lm.addLease("holder2", INodeId.ROOT_INODE_ID + 2);
lm.addLease("holder3", INodeId.ROOT_INODE_ID + 3);
assertEquals(lm.countLease(), 3);
//Initiate a call to checkLease. This should exit within the test timeout
lm.checkLeases();
}
@Test
public void testCountPath() {
LeaseManager lm = new LeaseManager(makeMockFsNameSystem());
lm.addLease("holder1", 1);
assertThat(lm.countPath(), is(1L));
lm.addLease("holder2", 2);
assertThat(lm.countPath(), is(2L));
lm.addLease("holder2", 2); // Duplicate addition
assertThat(lm.countPath(), is(2L));
assertThat(lm.countPath(), is(2L));
// Remove a couple of non-existing leases. countPath should not change.
lm.removeLease("holder2", stubInodeFile(3));
lm.removeLease("InvalidLeaseHolder", stubInodeFile(1));
assertThat(lm.countPath(), is(2L));
INodeFile file = stubInodeFile(1);
lm.reassignLease(lm.getLease(file), file, "holder2");
assertThat(lm.countPath(), is(2L)); // Count unchanged on reassign
lm.removeLease("holder2", stubInodeFile(2)); // Remove existing
assertThat(lm.countPath(), is(1L));
}
private static FSNamesystem makeMockFsNameSystem() {
FSDirectory dir = mock(FSDirectory.class);
FSNamesystem fsn = mock(FSNamesystem.class);
when(fsn.isRunning()).thenReturn(true);
when(fsn.hasWriteLock()).thenReturn(true);
when(fsn.getFSDirectory()).thenReturn(dir);
return fsn;
}
private static INodeFile stubInodeFile(long inodeId) {
PermissionStatus p = new PermissionStatus(
"dummy", "dummy", new FsPermission((short) 0777));
return new INodeFile(
inodeId, "/foo".getBytes(), p, 0L, 0L,
BlockInfo.EMPTY_ARRAY, (short) 1, 1L);
}
}
| 4,389 | 33.84127 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeHttpServer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.File;
import java.net.InetSocketAddress;
import java.net.URL;
import java.net.URLConnection;
import java.util.Arrays;
import java.util.Collection;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.web.URLConnectionFactory;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.http.HttpConfig.Policy;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
@RunWith(value = Parameterized.class)
public class TestNameNodeHttpServer {
private static final String BASEDIR = System.getProperty("test.build.dir",
"target/test-dir") + "/" + TestNameNodeHttpServer.class.getSimpleName();
private static String keystoresDir;
private static String sslConfDir;
private static Configuration conf;
private static URLConnectionFactory connectionFactory;
@Parameters
public static Collection<Object[]> policy() {
Object[][] params = new Object[][] { { HttpConfig.Policy.HTTP_ONLY },
{ HttpConfig.Policy.HTTPS_ONLY }, { HttpConfig.Policy.HTTP_AND_HTTPS } };
return Arrays.asList(params);
}
private final HttpConfig.Policy policy;
public TestNameNodeHttpServer(Policy policy) {
super();
this.policy = policy;
}
@BeforeClass
public static void setUp() throws Exception {
File base = new File(BASEDIR);
FileUtil.fullyDelete(base);
base.mkdirs();
conf = new Configuration();
keystoresDir = new File(BASEDIR).getAbsolutePath();
sslConfDir = KeyStoreTestUtil.getClasspathDir(TestNameNodeHttpServer.class);
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
connectionFactory = URLConnectionFactory
.newDefaultURLConnectionFactory(conf);
}
@AfterClass
public static void tearDown() throws Exception {
FileUtil.fullyDelete(new File(BASEDIR));
KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
}
@Test
public void testHttpPolicy() throws Exception {
conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, policy.name());
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
InetSocketAddress addr = InetSocketAddress.createUnresolved("localhost", 0);
NameNodeHttpServer server = null;
try {
server = new NameNodeHttpServer(conf, null, addr);
server.start();
Assert.assertTrue(implies(policy.isHttpEnabled(),
canAccess("http", server.getHttpAddress())));
Assert.assertTrue(implies(!policy.isHttpEnabled(),
server.getHttpAddress() == null));
Assert.assertTrue(implies(policy.isHttpsEnabled(),
canAccess("https", server.getHttpsAddress())));
Assert.assertTrue(implies(!policy.isHttpsEnabled(),
server.getHttpsAddress() == null));
} finally {
if (server != null) {
server.stop();
}
}
}
private static boolean canAccess(String scheme, InetSocketAddress addr) {
if (addr == null)
return false;
try {
URL url = new URL(scheme + "://" + NetUtils.getHostPortString(addr));
URLConnection conn = connectionFactory.openConnection(url);
conn.connect();
conn.getContent();
} catch (Exception e) {
return false;
}
return true;
}
private static boolean implies(boolean a, boolean b) {
return !a || b;
}
}
| 4,490 | 33.282443 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import com.google.common.collect.ImmutableList;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.permission.*;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider.AccessControlEnforcer;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import com.google.common.collect.Lists;
public class TestINodeAttributeProvider {
private MiniDFSCluster miniDFS;
private static final Set<String> CALLED = new HashSet<String>();
public static class MyAuthorizationProvider extends INodeAttributeProvider {
public static class MyAccessControlEnforcer implements AccessControlEnforcer {
@Override
public void checkPermission(String fsOwner, String supergroup,
UserGroupInformation ugi, INodeAttributes[] inodeAttrs,
INode[] inodes, byte[][] pathByNameArr, int snapshotId, String path,
int ancestorIndex, boolean doCheckOwner, FsAction ancestorAccess,
FsAction parentAccess, FsAction access, FsAction subAccess,
boolean ignoreEmptyDir) throws AccessControlException {
CALLED.add("checkPermission|" + ancestorAccess + "|" + parentAccess + "|" + access);
}
}
@Override
public void start() {
CALLED.add("start");
}
@Override
public void stop() {
CALLED.add("stop");
}
@Override
public INodeAttributes getAttributes(String[] pathElements,
final INodeAttributes inode) {
CALLED.add("getAttributes");
final boolean useDefault = useDefault(pathElements);
return new INodeAttributes() {
@Override
public boolean isDirectory() {
return inode.isDirectory();
}
@Override
public byte[] getLocalNameBytes() {
return inode.getLocalNameBytes();
}
@Override
public String getUserName() {
return (useDefault) ? inode.getUserName() : "foo";
}
@Override
public String getGroupName() {
return (useDefault) ? inode.getGroupName() : "bar";
}
@Override
public FsPermission getFsPermission() {
return (useDefault) ? inode.getFsPermission()
: new FsPermission(getFsPermissionShort());
}
@Override
public short getFsPermissionShort() {
return (useDefault) ? inode.getFsPermissionShort()
: (short) getPermissionLong();
}
@Override
public long getPermissionLong() {
return (useDefault) ? inode.getPermissionLong() : 0770;
}
@Override
public AclFeature getAclFeature() {
AclFeature f;
if (useDefault) {
f = inode.getAclFeature();
} else {
AclEntry acl = new AclEntry.Builder().setType(AclEntryType.GROUP).
setPermission(FsAction.ALL).setName("xxx").build();
f = new AclFeature(AclEntryStatusFormat.toInt(
Lists.newArrayList(acl)));
}
return f;
}
@Override
public XAttrFeature getXAttrFeature() {
XAttrFeature x;
if (useDefault) {
x = inode.getXAttrFeature();
} else {
x = new XAttrFeature(ImmutableList.copyOf(
Lists.newArrayList(
new XAttr.Builder().setName("test")
.setValue(new byte[] {1, 2})
.build())));
}
return x;
}
@Override
public long getModificationTime() {
return (useDefault) ? inode.getModificationTime() : 0;
}
@Override
public long getAccessTime() {
return (useDefault) ? inode.getAccessTime() : 0;
}
};
}
@Override
public AccessControlEnforcer getExternalAccessControlEnforcer(
AccessControlEnforcer deafultEnforcer) {
return new MyAccessControlEnforcer();
}
private boolean useDefault(String[] pathElements) {
return (pathElements.length < 2) ||
!(pathElements[0].equals("user") && pathElements[1].equals("authz"));
}
}
@Before
public void setUp() throws IOException {
CALLED.clear();
Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_INODE_ATTRIBUTES_PROVIDER_KEY,
MyAuthorizationProvider.class.getName());
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
miniDFS = new MiniDFSCluster.Builder(conf).build();
}
@After
public void cleanUp() throws IOException {
CALLED.clear();
if (miniDFS != null) {
miniDFS.shutdown();
}
Assert.assertTrue(CALLED.contains("stop"));
}
@Test
public void testDelegationToProvider() throws Exception {
Assert.assertTrue(CALLED.contains("start"));
FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0));
fs.mkdirs(new Path("/tmp"));
fs.setPermission(new Path("/tmp"), new FsPermission((short) 0777));
UserGroupInformation ugi = UserGroupInformation.createUserForTesting("u1",
new String[]{"g1"});
ugi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0));
CALLED.clear();
fs.mkdirs(new Path("/tmp/foo"));
Assert.assertTrue(CALLED.contains("getAttributes"));
Assert.assertTrue(CALLED.contains("checkPermission|null|null|null"));
Assert.assertTrue(CALLED.contains("checkPermission|WRITE|null|null"));
CALLED.clear();
fs.listStatus(new Path("/tmp/foo"));
Assert.assertTrue(CALLED.contains("getAttributes"));
Assert.assertTrue(
CALLED.contains("checkPermission|null|null|READ_EXECUTE"));
CALLED.clear();
fs.getAclStatus(new Path("/tmp/foo"));
Assert.assertTrue(CALLED.contains("getAttributes"));
Assert.assertTrue(CALLED.contains("checkPermission|null|null|null"));
return null;
}
});
}
@Test
public void testCustomProvider() throws Exception {
FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0));
fs.mkdirs(new Path("/user/xxx"));
FileStatus status = fs.getFileStatus(new Path("/user/xxx"));
Assert.assertEquals(System.getProperty("user.name"), status.getOwner());
Assert.assertEquals("supergroup", status.getGroup());
Assert.assertEquals(new FsPermission((short) 0755), status.getPermission());
fs.mkdirs(new Path("/user/authz"));
Path p = new Path("/user/authz");
status = fs.getFileStatus(p);
Assert.assertEquals("foo", status.getOwner());
Assert.assertEquals("bar", status.getGroup());
Assert.assertEquals(new FsPermission((short) 0770), status.getPermission());
AclStatus aclStatus = fs.getAclStatus(p);
Assert.assertEquals(1, aclStatus.getEntries().size());
Assert.assertEquals(AclEntryType.GROUP, aclStatus.getEntries().get(0)
.getType());
Assert.assertEquals("xxx", aclStatus.getEntries().get(0)
.getName());
Assert.assertEquals(FsAction.ALL, aclStatus.getEntries().get(0)
.getPermission());
Map<String, byte[]> xAttrs = fs.getXAttrs(p);
Assert.assertTrue(xAttrs.containsKey("user.test"));
Assert.assertEquals(2, xAttrs.get("user.test").length);
}
}
| 8,927 | 34.428571 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CACHEREPORT_INTERVAL_MSEC_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS;
import static org.apache.hadoop.hdfs.protocol.CachePoolInfo.RELATIVE_EXPIRY_NEVER;
import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.EnumSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import org.apache.commons.lang.time.DateUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.CacheFlag;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.InvalidRequestException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.BlockReaderTestUtil;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.LogVerificationAppender;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo.Expiration;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveIterator;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveStats;
import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.CachePoolStats;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.io.nativeio.NativeIO.POSIX.CacheManipulator;
import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.GSet;
import org.apache.log4j.Level;
import org.apache.log4j.LogManager;
import org.apache.log4j.Logger;
import org.apache.htrace.Sampler;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import com.google.common.base.Supplier;
public class TestCacheDirectives {
static final Log LOG = LogFactory.getLog(TestCacheDirectives.class);
private static final UserGroupInformation unprivilegedUser =
UserGroupInformation.createRemoteUser("unprivilegedUser");
static private Configuration conf;
static private MiniDFSCluster cluster;
static private DistributedFileSystem dfs;
static private NamenodeProtocols proto;
static private NameNode namenode;
static private CacheManipulator prevCacheManipulator;
static {
NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator());
}
private static final long BLOCK_SIZE = 4096;
private static final int NUM_DATANODES = 4;
// Most Linux installs will allow non-root users to lock 64KB.
// In this test though, we stub out mlock so this doesn't matter.
private static final long CACHE_CAPACITY = 64 * 1024 / NUM_DATANODES;
private static HdfsConfiguration createCachingConf() {
HdfsConfiguration conf = new HdfsConfiguration();
conf.setLong(DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
conf.setLong(DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, CACHE_CAPACITY);
conf.setLong(DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setLong(DFS_CACHEREPORT_INTERVAL_MSEC_KEY, 1000);
conf.setLong(DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS, 1000);
// set low limits here for testing purposes
conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES, 2);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES,
2);
return conf;
}
@Before
public void setup() throws Exception {
conf = createCachingConf();
cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).build();
cluster.waitActive();
dfs = cluster.getFileSystem();
proto = cluster.getNameNodeRpc();
namenode = cluster.getNameNode();
prevCacheManipulator = NativeIO.POSIX.getCacheManipulator();
NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator());
BlockReaderTestUtil.enableHdfsCachingTracing();
}
@After
public void teardown() throws Exception {
// Remove cache directives left behind by tests so that we release mmaps.
RemoteIterator<CacheDirectiveEntry> iter = dfs.listCacheDirectives(null);
while (iter.hasNext()) {
dfs.removeCacheDirective(iter.next().getInfo().getId());
}
waitForCachedBlocks(namenode, 0, 0, "teardown");
if (cluster != null) {
cluster.shutdown();
}
// Restore the original CacheManipulator
NativeIO.POSIX.setCacheManipulator(prevCacheManipulator);
}
@Test(timeout=60000)
public void testBasicPoolOperations() throws Exception {
final String poolName = "pool1";
CachePoolInfo info = new CachePoolInfo(poolName).
setOwnerName("bob").setGroupName("bobgroup").
setMode(new FsPermission((short)0755)).setLimit(150l);
// Add a pool
dfs.addCachePool(info);
// Do some bad addCachePools
try {
dfs.addCachePool(info);
fail("added the pool with the same name twice");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("pool1 already exists", ioe);
}
try {
dfs.addCachePool(new CachePoolInfo(""));
fail("added empty pool");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("invalid empty cache pool name",
ioe);
}
try {
dfs.addCachePool(null);
fail("added null pool");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("CachePoolInfo is null", ioe);
}
try {
proto.addCachePool(new CachePoolInfo(""));
fail("added empty pool");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("invalid empty cache pool name",
ioe);
}
try {
proto.addCachePool(null);
fail("added null pool");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("CachePoolInfo is null", ioe);
}
// Modify the pool
info.setOwnerName("jane").setGroupName("janegroup")
.setMode(new FsPermission((short)0700)).setLimit(314l);
dfs.modifyCachePool(info);
// Do some invalid modify pools
try {
dfs.modifyCachePool(new CachePoolInfo("fool"));
fail("modified non-existent cache pool");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("fool does not exist", ioe);
}
try {
dfs.modifyCachePool(new CachePoolInfo(""));
fail("modified empty pool");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("invalid empty cache pool name",
ioe);
}
try {
dfs.modifyCachePool(null);
fail("modified null pool");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("CachePoolInfo is null", ioe);
}
try {
proto.modifyCachePool(new CachePoolInfo(""));
fail("modified empty pool");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("invalid empty cache pool name",
ioe);
}
try {
proto.modifyCachePool(null);
fail("modified null pool");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("CachePoolInfo is null", ioe);
}
// Remove the pool
dfs.removeCachePool(poolName);
// Do some bad removePools
try {
dfs.removeCachePool("pool99");
fail("expected to get an exception when " +
"removing a non-existent pool.");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("Cannot remove " +
"non-existent cache pool", ioe);
}
try {
dfs.removeCachePool(poolName);
fail("expected to get an exception when " +
"removing a non-existent pool.");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("Cannot remove " +
"non-existent cache pool", ioe);
}
try {
dfs.removeCachePool("");
fail("removed empty pool");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("invalid empty cache pool name",
ioe);
}
try {
dfs.removeCachePool(null);
fail("removed null pool");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("invalid empty cache pool name",
ioe);
}
try {
proto.removeCachePool("");
fail("removed empty pool");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("invalid empty cache pool name",
ioe);
}
try {
proto.removeCachePool(null);
fail("removed null pool");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("invalid empty cache pool name",
ioe);
}
info = new CachePoolInfo("pool2");
dfs.addCachePool(info);
}
@Test(timeout=60000)
public void testCreateAndModifyPools() throws Exception {
String poolName = "pool1";
String ownerName = "abc";
String groupName = "123";
FsPermission mode = new FsPermission((short)0755);
long limit = 150;
dfs.addCachePool(new CachePoolInfo(poolName).
setOwnerName(ownerName).setGroupName(groupName).
setMode(mode).setLimit(limit));
RemoteIterator<CachePoolEntry> iter = dfs.listCachePools();
CachePoolInfo info = iter.next().getInfo();
assertEquals(poolName, info.getPoolName());
assertEquals(ownerName, info.getOwnerName());
assertEquals(groupName, info.getGroupName());
ownerName = "def";
groupName = "456";
mode = new FsPermission((short)0700);
limit = 151;
dfs.modifyCachePool(new CachePoolInfo(poolName).
setOwnerName(ownerName).setGroupName(groupName).
setMode(mode).setLimit(limit));
iter = dfs.listCachePools();
info = iter.next().getInfo();
assertEquals(poolName, info.getPoolName());
assertEquals(ownerName, info.getOwnerName());
assertEquals(groupName, info.getGroupName());
assertEquals(mode, info.getMode());
assertEquals(limit, (long)info.getLimit());
dfs.removeCachePool(poolName);
iter = dfs.listCachePools();
assertFalse("expected no cache pools after deleting pool", iter.hasNext());
proto.listCachePools(null);
try {
proto.removeCachePool("pool99");
fail("expected to get an exception when " +
"removing a non-existent pool.");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("Cannot remove non-existent",
ioe);
}
try {
proto.removeCachePool(poolName);
fail("expected to get an exception when " +
"removing a non-existent pool.");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("Cannot remove non-existent",
ioe);
}
iter = dfs.listCachePools();
assertFalse("expected no cache pools after deleting pool", iter.hasNext());
}
private static void validateListAll(
RemoteIterator<CacheDirectiveEntry> iter,
Long... ids) throws Exception {
for (Long id: ids) {
assertTrue("Unexpectedly few elements", iter.hasNext());
assertEquals("Unexpected directive ID", id,
iter.next().getInfo().getId());
}
assertFalse("Unexpectedly many list elements", iter.hasNext());
}
private static long addAsUnprivileged(
final CacheDirectiveInfo directive) throws Exception {
return unprivilegedUser
.doAs(new PrivilegedExceptionAction<Long>() {
@Override
public Long run() throws IOException {
DistributedFileSystem myDfs =
(DistributedFileSystem) FileSystem.get(conf);
return myDfs.addCacheDirective(directive);
}
});
}
@Test(timeout=60000)
public void testAddRemoveDirectives() throws Exception {
proto.addCachePool(new CachePoolInfo("pool1").
setMode(new FsPermission((short)0777)));
proto.addCachePool(new CachePoolInfo("pool2").
setMode(new FsPermission((short)0777)));
proto.addCachePool(new CachePoolInfo("pool3").
setMode(new FsPermission((short)0777)));
proto.addCachePool(new CachePoolInfo("pool4").
setMode(new FsPermission((short)0)));
CacheDirectiveInfo alpha = new CacheDirectiveInfo.Builder().
setPath(new Path("/alpha")).
setPool("pool1").
build();
CacheDirectiveInfo beta = new CacheDirectiveInfo.Builder().
setPath(new Path("/beta")).
setPool("pool2").
build();
CacheDirectiveInfo delta = new CacheDirectiveInfo.Builder().
setPath(new Path("/delta")).
setPool("pool1").
build();
long alphaId = addAsUnprivileged(alpha);
long alphaId2 = addAsUnprivileged(alpha);
assertFalse("Expected to get unique directives when re-adding an "
+ "existing CacheDirectiveInfo",
alphaId == alphaId2);
long betaId = addAsUnprivileged(beta);
try {
addAsUnprivileged(new CacheDirectiveInfo.Builder().
setPath(new Path("/unicorn")).
setPool("no_such_pool").
build());
fail("expected an error when adding to a non-existent pool.");
} catch (InvalidRequestException ioe) {
GenericTestUtils.assertExceptionContains("Unknown pool", ioe);
}
try {
addAsUnprivileged(new CacheDirectiveInfo.Builder().
setPath(new Path("/blackhole")).
setPool("pool4").
build());
fail("expected an error when adding to a pool with " +
"mode 0 (no permissions for anyone).");
} catch (AccessControlException e) {
GenericTestUtils.
assertExceptionContains("Permission denied while accessing pool", e);
}
try {
addAsUnprivileged(new CacheDirectiveInfo.Builder().
setPath(new Path("/illegal:path/")).
setPool("pool1").
build());
fail("expected an error when adding a malformed path " +
"to the cache directives.");
} catch (IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("is not a valid DFS filename", e);
}
try {
addAsUnprivileged(new CacheDirectiveInfo.Builder().
setPath(new Path("/emptypoolname")).
setReplication((short)1).
setPool("").
build());
fail("expected an error when adding a cache " +
"directive with an empty pool name.");
} catch (InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("Invalid empty pool name", e);
}
long deltaId = addAsUnprivileged(delta);
// We expect the following to succeed, because DistributedFileSystem
// qualifies the path.
long relativeId = addAsUnprivileged(
new CacheDirectiveInfo.Builder().
setPath(new Path("relative")).
setPool("pool1").
build());
RemoteIterator<CacheDirectiveEntry> iter;
iter = dfs.listCacheDirectives(null);
validateListAll(iter, alphaId, alphaId2, betaId, deltaId, relativeId );
iter = dfs.listCacheDirectives(
new CacheDirectiveInfo.Builder().setPool("pool3").build());
assertFalse(iter.hasNext());
iter = dfs.listCacheDirectives(
new CacheDirectiveInfo.Builder().setPool("pool1").build());
validateListAll(iter, alphaId, alphaId2, deltaId, relativeId );
iter = dfs.listCacheDirectives(
new CacheDirectiveInfo.Builder().setPool("pool2").build());
validateListAll(iter, betaId);
iter = dfs.listCacheDirectives(
new CacheDirectiveInfo.Builder().setId(alphaId2).build());
validateListAll(iter, alphaId2);
iter = dfs.listCacheDirectives(
new CacheDirectiveInfo.Builder().setId(relativeId).build());
validateListAll(iter, relativeId);
dfs.removeCacheDirective(betaId);
iter = dfs.listCacheDirectives(
new CacheDirectiveInfo.Builder().setPool("pool2").build());
assertFalse(iter.hasNext());
try {
dfs.removeCacheDirective(betaId);
fail("expected an error when removing a non-existent ID");
} catch (InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("No directive with ID", e);
}
try {
proto.removeCacheDirective(-42l);
fail("expected an error when removing a negative ID");
} catch (InvalidRequestException e) {
GenericTestUtils.assertExceptionContains(
"Invalid negative ID", e);
}
try {
proto.removeCacheDirective(43l);
fail("expected an error when removing a non-existent ID");
} catch (InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("No directive with ID", e);
}
dfs.removeCacheDirective(alphaId);
dfs.removeCacheDirective(alphaId2);
dfs.removeCacheDirective(deltaId);
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().
setId(relativeId).
setReplication((short)555).
build());
iter = dfs.listCacheDirectives(null);
assertTrue(iter.hasNext());
CacheDirectiveInfo modified = iter.next().getInfo();
assertEquals(relativeId, modified.getId().longValue());
assertEquals((short)555, modified.getReplication().shortValue());
dfs.removeCacheDirective(relativeId);
iter = dfs.listCacheDirectives(null);
assertFalse(iter.hasNext());
// Verify that PBCDs with path "." work correctly
CacheDirectiveInfo directive =
new CacheDirectiveInfo.Builder().setPath(new Path("."))
.setPool("pool1").build();
long id = dfs.addCacheDirective(directive);
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(
directive).setId(id).setReplication((short)2).build());
dfs.removeCacheDirective(id);
}
@Test(timeout=60000)
public void testCacheManagerRestart() throws Exception {
SecondaryNameNode secondary = null;
try {
// Start a secondary namenode
conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
"0.0.0.0:0");
secondary = new SecondaryNameNode(conf);
// Create and validate a pool
final String pool = "poolparty";
String groupName = "partygroup";
FsPermission mode = new FsPermission((short)0777);
long limit = 747;
dfs.addCachePool(new CachePoolInfo(pool)
.setGroupName(groupName)
.setMode(mode)
.setLimit(limit));
RemoteIterator<CachePoolEntry> pit = dfs.listCachePools();
assertTrue("No cache pools found", pit.hasNext());
CachePoolInfo info = pit.next().getInfo();
assertEquals(pool, info.getPoolName());
assertEquals(groupName, info.getGroupName());
assertEquals(mode, info.getMode());
assertEquals(limit, (long)info.getLimit());
assertFalse("Unexpected # of cache pools found", pit.hasNext());
// Create some cache entries
int numEntries = 10;
String entryPrefix = "/party-";
long prevId = -1;
final Date expiry = new Date();
for (int i=0; i<numEntries; i++) {
prevId = dfs.addCacheDirective(
new CacheDirectiveInfo.Builder().
setPath(new Path(entryPrefix + i)).setPool(pool).
setExpiration(
CacheDirectiveInfo.Expiration.newAbsolute(expiry.getTime())).
build());
}
RemoteIterator<CacheDirectiveEntry> dit
= dfs.listCacheDirectives(null);
for (int i=0; i<numEntries; i++) {
assertTrue("Unexpected # of cache entries: " + i, dit.hasNext());
CacheDirectiveInfo cd = dit.next().getInfo();
assertEquals(i+1, cd.getId().longValue());
assertEquals(entryPrefix + i, cd.getPath().toUri().getPath());
assertEquals(pool, cd.getPool());
}
assertFalse("Unexpected # of cache directives found", dit.hasNext());
// Checkpoint once to set some cache pools and directives on 2NN side
secondary.doCheckpoint();
// Add some more CacheManager state
final String imagePool = "imagePool";
dfs.addCachePool(new CachePoolInfo(imagePool));
prevId = dfs.addCacheDirective(new CacheDirectiveInfo.Builder()
.setPath(new Path("/image")).setPool(imagePool).build());
// Save a new image to force a fresh fsimage download
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
dfs.saveNamespace();
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
// Checkpoint again forcing a reload of FSN state
boolean fetchImage = secondary.doCheckpoint();
assertTrue("Secondary should have fetched a new fsimage from NameNode",
fetchImage);
// Remove temp pool and directive
dfs.removeCachePool(imagePool);
// Restart namenode
cluster.restartNameNode();
// Check that state came back up
pit = dfs.listCachePools();
assertTrue("No cache pools found", pit.hasNext());
info = pit.next().getInfo();
assertEquals(pool, info.getPoolName());
assertEquals(pool, info.getPoolName());
assertEquals(groupName, info.getGroupName());
assertEquals(mode, info.getMode());
assertEquals(limit, (long)info.getLimit());
assertFalse("Unexpected # of cache pools found", pit.hasNext());
dit = dfs.listCacheDirectives(null);
for (int i=0; i<numEntries; i++) {
assertTrue("Unexpected # of cache entries: " + i, dit.hasNext());
CacheDirectiveInfo cd = dit.next().getInfo();
assertEquals(i+1, cd.getId().longValue());
assertEquals(entryPrefix + i, cd.getPath().toUri().getPath());
assertEquals(pool, cd.getPool());
assertEquals(expiry.getTime(), cd.getExpiration().getMillis());
}
assertFalse("Unexpected # of cache directives found", dit.hasNext());
long nextId = dfs.addCacheDirective(
new CacheDirectiveInfo.Builder().
setPath(new Path("/foobar")).setPool(pool).build());
assertEquals(prevId + 1, nextId);
} finally {
if (secondary != null) {
secondary.shutdown();
}
}
}
/**
* Wait for the NameNode to have an expected number of cached blocks
* and replicas.
* @param nn NameNode
* @param expectedCachedBlocks if -1, treat as wildcard
* @param expectedCachedReplicas if -1, treat as wildcard
* @throws Exception
*/
private static void waitForCachedBlocks(NameNode nn,
final int expectedCachedBlocks, final int expectedCachedReplicas,
final String logString) throws Exception {
final FSNamesystem namesystem = nn.getNamesystem();
final CacheManager cacheManager = namesystem.getCacheManager();
LOG.info("Waiting for " + expectedCachedBlocks + " blocks with " +
expectedCachedReplicas + " replicas.");
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
int numCachedBlocks = 0, numCachedReplicas = 0;
namesystem.readLock();
try {
GSet<CachedBlock, CachedBlock> cachedBlocks =
cacheManager.getCachedBlocks();
if (cachedBlocks != null) {
for (Iterator<CachedBlock> iter = cachedBlocks.iterator();
iter.hasNext(); ) {
CachedBlock cachedBlock = iter.next();
numCachedBlocks++;
numCachedReplicas += cachedBlock.getDatanodes(Type.CACHED).size();
}
}
} finally {
namesystem.readUnlock();
}
LOG.info(logString + " cached blocks: have " + numCachedBlocks +
" / " + expectedCachedBlocks + ". " +
"cached replicas: have " + numCachedReplicas +
" / " + expectedCachedReplicas);
if (expectedCachedBlocks == -1 ||
numCachedBlocks == expectedCachedBlocks) {
if (expectedCachedReplicas == -1 ||
numCachedReplicas == expectedCachedReplicas) {
return true;
}
}
return false;
}
}, 500, 60000);
}
private static void waitForCacheDirectiveStats(final DistributedFileSystem dfs,
final long targetBytesNeeded, final long targetBytesCached,
final long targetFilesNeeded, final long targetFilesCached,
final CacheDirectiveInfo filter, final String infoString)
throws Exception {
LOG.info("Polling listCacheDirectives " +
((filter == null) ? "ALL" : filter.toString()) + " for " +
targetBytesNeeded + " targetBytesNeeded, " +
targetBytesCached + " targetBytesCached, " +
targetFilesNeeded + " targetFilesNeeded, " +
targetFilesCached + " targetFilesCached");
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
RemoteIterator<CacheDirectiveEntry> iter = null;
CacheDirectiveEntry entry = null;
try {
iter = dfs.listCacheDirectives(filter);
entry = iter.next();
} catch (IOException e) {
fail("got IOException while calling " +
"listCacheDirectives: " + e.getMessage());
}
Assert.assertNotNull(entry);
CacheDirectiveStats stats = entry.getStats();
if ((targetBytesNeeded == stats.getBytesNeeded()) &&
(targetBytesCached == stats.getBytesCached()) &&
(targetFilesNeeded == stats.getFilesNeeded()) &&
(targetFilesCached == stats.getFilesCached())) {
return true;
} else {
LOG.info(infoString + ": " +
"filesNeeded: " +
stats.getFilesNeeded() + "/" + targetFilesNeeded +
", filesCached: " +
stats.getFilesCached() + "/" + targetFilesCached +
", bytesNeeded: " +
stats.getBytesNeeded() + "/" + targetBytesNeeded +
", bytesCached: " +
stats.getBytesCached() + "/" + targetBytesCached);
return false;
}
}
}, 500, 60000);
}
private static void waitForCachePoolStats(final DistributedFileSystem dfs,
final long targetBytesNeeded, final long targetBytesCached,
final long targetFilesNeeded, final long targetFilesCached,
final CachePoolInfo pool, final String infoString)
throws Exception {
LOG.info("Polling listCachePools " + pool.toString() + " for " +
targetBytesNeeded + " targetBytesNeeded, " +
targetBytesCached + " targetBytesCached, " +
targetFilesNeeded + " targetFilesNeeded, " +
targetFilesCached + " targetFilesCached");
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
RemoteIterator<CachePoolEntry> iter = null;
try {
iter = dfs.listCachePools();
} catch (IOException e) {
fail("got IOException while calling " +
"listCachePools: " + e.getMessage());
}
while (true) {
CachePoolEntry entry = null;
try {
if (!iter.hasNext()) {
break;
}
entry = iter.next();
} catch (IOException e) {
fail("got IOException while iterating through " +
"listCachePools: " + e.getMessage());
}
if (entry == null) {
break;
}
if (!entry.getInfo().getPoolName().equals(pool.getPoolName())) {
continue;
}
CachePoolStats stats = entry.getStats();
if ((targetBytesNeeded == stats.getBytesNeeded()) &&
(targetBytesCached == stats.getBytesCached()) &&
(targetFilesNeeded == stats.getFilesNeeded()) &&
(targetFilesCached == stats.getFilesCached())) {
return true;
} else {
LOG.info(infoString + ": " +
"filesNeeded: " +
stats.getFilesNeeded() + "/" + targetFilesNeeded +
", filesCached: " +
stats.getFilesCached() + "/" + targetFilesCached +
", bytesNeeded: " +
stats.getBytesNeeded() + "/" + targetBytesNeeded +
", bytesCached: " +
stats.getBytesCached() + "/" + targetBytesCached);
return false;
}
}
return false;
}
}, 500, 60000);
}
private static void checkNumCachedReplicas(final DistributedFileSystem dfs,
final List<Path> paths, final int expectedBlocks,
final int expectedReplicas)
throws Exception {
int numCachedBlocks = 0;
int numCachedReplicas = 0;
for (Path p: paths) {
final FileStatus f = dfs.getFileStatus(p);
final long len = f.getLen();
final long blockSize = f.getBlockSize();
// round it up to full blocks
final long numBlocks = (len + blockSize - 1) / blockSize;
BlockLocation[] locs = dfs.getFileBlockLocations(p, 0, len);
assertEquals("Unexpected number of block locations for path " + p,
numBlocks, locs.length);
for (BlockLocation l: locs) {
if (l.getCachedHosts().length > 0) {
numCachedBlocks++;
}
numCachedReplicas += l.getCachedHosts().length;
}
}
LOG.info("Found " + numCachedBlocks + " of " + expectedBlocks + " blocks");
LOG.info("Found " + numCachedReplicas + " of " + expectedReplicas
+ " replicas");
assertEquals("Unexpected number of cached blocks", expectedBlocks,
numCachedBlocks);
assertEquals("Unexpected number of cached replicas", expectedReplicas,
numCachedReplicas);
}
@Test(timeout=120000)
public void testWaitForCachedReplicas() throws Exception {
FileSystemTestHelper helper = new FileSystemTestHelper();
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
return ((namenode.getNamesystem().getCacheCapacity() ==
(NUM_DATANODES * CACHE_CAPACITY)) &&
(namenode.getNamesystem().getCacheUsed() == 0));
}
}, 500, 60000);
// Send a cache report referring to a bogus block. It is important that
// the NameNode be robust against this.
NamenodeProtocols nnRpc = namenode.getRpcServer();
DataNode dn0 = cluster.getDataNodes().get(0);
String bpid = cluster.getNamesystem().getBlockPoolId();
LinkedList<Long> bogusBlockIds = new LinkedList<Long> ();
bogusBlockIds.add(999999L);
nnRpc.cacheReport(dn0.getDNRegistrationForBP(bpid), bpid, bogusBlockIds);
Path rootDir = helper.getDefaultWorkingDirectory(dfs);
// Create the pool
final String pool = "friendlyPool";
nnRpc.addCachePool(new CachePoolInfo("friendlyPool"));
// Create some test files
final int numFiles = 2;
final int numBlocksPerFile = 2;
final List<String> paths = new ArrayList<String>(numFiles);
for (int i=0; i<numFiles; i++) {
Path p = new Path(rootDir, "testCachePaths-" + i);
FileSystemTestHelper.createFile(dfs, p, numBlocksPerFile,
(int)BLOCK_SIZE);
paths.add(p.toUri().getPath());
}
// Check the initial statistics at the namenode
waitForCachedBlocks(namenode, 0, 0, "testWaitForCachedReplicas:0");
// Cache and check each path in sequence
int expected = 0;
for (int i=0; i<numFiles; i++) {
CacheDirectiveInfo directive =
new CacheDirectiveInfo.Builder().
setPath(new Path(paths.get(i))).
setPool(pool).
build();
nnRpc.addCacheDirective(directive, EnumSet.noneOf(CacheFlag.class));
expected += numBlocksPerFile;
waitForCachedBlocks(namenode, expected, expected,
"testWaitForCachedReplicas:1");
}
// Check that the datanodes have the right cache values
DatanodeInfo[] live = dfs.getDataNodeStats(DatanodeReportType.LIVE);
assertEquals("Unexpected number of live nodes", NUM_DATANODES, live.length);
long totalUsed = 0;
for (DatanodeInfo dn : live) {
final long cacheCapacity = dn.getCacheCapacity();
final long cacheUsed = dn.getCacheUsed();
final long cacheRemaining = dn.getCacheRemaining();
assertEquals("Unexpected cache capacity", CACHE_CAPACITY, cacheCapacity);
assertEquals("Capacity not equal to used + remaining",
cacheCapacity, cacheUsed + cacheRemaining);
assertEquals("Remaining not equal to capacity - used",
cacheCapacity - cacheUsed, cacheRemaining);
totalUsed += cacheUsed;
}
assertEquals(expected*BLOCK_SIZE, totalUsed);
// Uncache and check each path in sequence
RemoteIterator<CacheDirectiveEntry> entries =
new CacheDirectiveIterator(nnRpc, null, Sampler.NEVER);
for (int i=0; i<numFiles; i++) {
CacheDirectiveEntry entry = entries.next();
nnRpc.removeCacheDirective(entry.getInfo().getId());
expected -= numBlocksPerFile;
waitForCachedBlocks(namenode, expected, expected,
"testWaitForCachedReplicas:2");
}
}
@Test(timeout=120000)
public void testWaitForCachedReplicasInDirectory() throws Exception {
// Create the pool
final String pool = "friendlyPool";
final CachePoolInfo poolInfo = new CachePoolInfo(pool);
dfs.addCachePool(poolInfo);
// Create some test files
final List<Path> paths = new LinkedList<Path>();
paths.add(new Path("/foo/bar"));
paths.add(new Path("/foo/baz"));
paths.add(new Path("/foo2/bar2"));
paths.add(new Path("/foo2/baz2"));
dfs.mkdir(new Path("/foo"), FsPermission.getDirDefault());
dfs.mkdir(new Path("/foo2"), FsPermission.getDirDefault());
final int numBlocksPerFile = 2;
for (Path path : paths) {
FileSystemTestHelper.createFile(dfs, path, numBlocksPerFile,
(int)BLOCK_SIZE, (short)3, false);
}
waitForCachedBlocks(namenode, 0, 0,
"testWaitForCachedReplicasInDirectory:0");
// cache entire directory
long id = dfs.addCacheDirective(
new CacheDirectiveInfo.Builder().
setPath(new Path("/foo")).
setReplication((short)2).
setPool(pool).
build());
waitForCachedBlocks(namenode, 4, 8,
"testWaitForCachedReplicasInDirectory:1:blocks");
// Verify that listDirectives gives the stats we want.
waitForCacheDirectiveStats(dfs,
4 * numBlocksPerFile * BLOCK_SIZE, 4 * numBlocksPerFile * BLOCK_SIZE,
2, 2,
new CacheDirectiveInfo.Builder().
setPath(new Path("/foo")).
build(),
"testWaitForCachedReplicasInDirectory:1:directive");
waitForCachePoolStats(dfs,
4 * numBlocksPerFile * BLOCK_SIZE, 4 * numBlocksPerFile * BLOCK_SIZE,
2, 2,
poolInfo, "testWaitForCachedReplicasInDirectory:1:pool");
long id2 = dfs.addCacheDirective(
new CacheDirectiveInfo.Builder().
setPath(new Path("/foo/bar")).
setReplication((short)4).
setPool(pool).
build());
// wait for an additional 2 cached replicas to come up
waitForCachedBlocks(namenode, 4, 10,
"testWaitForCachedReplicasInDirectory:2:blocks");
// the directory directive's stats are unchanged
waitForCacheDirectiveStats(dfs,
4 * numBlocksPerFile * BLOCK_SIZE, 4 * numBlocksPerFile * BLOCK_SIZE,
2, 2,
new CacheDirectiveInfo.Builder().
setPath(new Path("/foo")).
build(),
"testWaitForCachedReplicasInDirectory:2:directive-1");
// verify /foo/bar's stats
waitForCacheDirectiveStats(dfs,
4 * numBlocksPerFile * BLOCK_SIZE,
// only 3 because the file only has 3 replicas, not 4 as requested.
3 * numBlocksPerFile * BLOCK_SIZE,
1,
// only 0 because the file can't be fully cached
0,
new CacheDirectiveInfo.Builder().
setPath(new Path("/foo/bar")).
build(),
"testWaitForCachedReplicasInDirectory:2:directive-2");
waitForCachePoolStats(dfs,
(4+4) * numBlocksPerFile * BLOCK_SIZE,
(4+3) * numBlocksPerFile * BLOCK_SIZE,
3, 2,
poolInfo, "testWaitForCachedReplicasInDirectory:2:pool");
// remove and watch numCached go to 0
dfs.removeCacheDirective(id);
dfs.removeCacheDirective(id2);
waitForCachedBlocks(namenode, 0, 0,
"testWaitForCachedReplicasInDirectory:3:blocks");
waitForCachePoolStats(dfs,
0, 0,
0, 0,
poolInfo, "testWaitForCachedReplicasInDirectory:3:pool");
}
/**
* Tests stepping the cache replication factor up and down, checking the
* number of cached replicas and blocks as well as the advertised locations.
* @throws Exception
*/
@Test(timeout=120000)
public void testReplicationFactor() throws Exception {
// Create the pool
final String pool = "friendlyPool";
dfs.addCachePool(new CachePoolInfo(pool));
// Create some test files
final List<Path> paths = new LinkedList<Path>();
paths.add(new Path("/foo/bar"));
paths.add(new Path("/foo/baz"));
paths.add(new Path("/foo2/bar2"));
paths.add(new Path("/foo2/baz2"));
dfs.mkdir(new Path("/foo"), FsPermission.getDirDefault());
dfs.mkdir(new Path("/foo2"), FsPermission.getDirDefault());
final int numBlocksPerFile = 2;
for (Path path : paths) {
FileSystemTestHelper.createFile(dfs, path, numBlocksPerFile,
(int)BLOCK_SIZE, (short)3, false);
}
waitForCachedBlocks(namenode, 0, 0, "testReplicationFactor:0");
checkNumCachedReplicas(dfs, paths, 0, 0);
// cache directory
long id = dfs.addCacheDirective(
new CacheDirectiveInfo.Builder().
setPath(new Path("/foo")).
setReplication((short)1).
setPool(pool).
build());
waitForCachedBlocks(namenode, 4, 4, "testReplicationFactor:1");
checkNumCachedReplicas(dfs, paths, 4, 4);
// step up the replication factor
for (int i=2; i<=3; i++) {
dfs.modifyCacheDirective(
new CacheDirectiveInfo.Builder().
setId(id).
setReplication((short)i).
build());
waitForCachedBlocks(namenode, 4, 4*i, "testReplicationFactor:2");
checkNumCachedReplicas(dfs, paths, 4, 4*i);
}
// step it down
for (int i=2; i>=1; i--) {
dfs.modifyCacheDirective(
new CacheDirectiveInfo.Builder().
setId(id).
setReplication((short)i).
build());
waitForCachedBlocks(namenode, 4, 4*i, "testReplicationFactor:3");
checkNumCachedReplicas(dfs, paths, 4, 4*i);
}
// remove and watch numCached go to 0
dfs.removeCacheDirective(id);
waitForCachedBlocks(namenode, 0, 0, "testReplicationFactor:4");
checkNumCachedReplicas(dfs, paths, 0, 0);
}
@Test(timeout=60000)
public void testListCachePoolPermissions() throws Exception {
final UserGroupInformation myUser = UserGroupInformation
.createRemoteUser("myuser");
final DistributedFileSystem myDfs =
(DistributedFileSystem)DFSTestUtil.getFileSystemAs(myUser, conf);
final String poolName = "poolparty";
dfs.addCachePool(new CachePoolInfo(poolName)
.setMode(new FsPermission((short)0700)));
// Should only see partial info
RemoteIterator<CachePoolEntry> it = myDfs.listCachePools();
CachePoolInfo info = it.next().getInfo();
assertFalse(it.hasNext());
assertEquals("Expected pool name", poolName, info.getPoolName());
assertNull("Unexpected owner name", info.getOwnerName());
assertNull("Unexpected group name", info.getGroupName());
assertNull("Unexpected mode", info.getMode());
assertNull("Unexpected limit", info.getLimit());
// Modify the pool so myuser is now the owner
final long limit = 99;
dfs.modifyCachePool(new CachePoolInfo(poolName)
.setOwnerName(myUser.getShortUserName())
.setLimit(limit));
// Should see full info
it = myDfs.listCachePools();
info = it.next().getInfo();
assertFalse(it.hasNext());
assertEquals("Expected pool name", poolName, info.getPoolName());
assertEquals("Mismatched owner name", myUser.getShortUserName(),
info.getOwnerName());
assertNotNull("Expected group name", info.getGroupName());
assertEquals("Mismatched mode", (short) 0700,
info.getMode().toShort());
assertEquals("Mismatched limit", limit, (long)info.getLimit());
}
@Test(timeout=120000)
public void testExpiry() throws Exception {
String pool = "pool1";
dfs.addCachePool(new CachePoolInfo(pool));
Path p = new Path("/mypath");
DFSTestUtil.createFile(dfs, p, BLOCK_SIZE*2, (short)2, 0x999);
// Expire after test timeout
Date start = new Date();
Date expiry = DateUtils.addSeconds(start, 120);
final long id = dfs.addCacheDirective(new CacheDirectiveInfo.Builder()
.setPath(p)
.setPool(pool)
.setExpiration(CacheDirectiveInfo.Expiration.newAbsolute(expiry))
.setReplication((short)2)
.build());
waitForCachedBlocks(cluster.getNameNode(), 2, 4, "testExpiry:1");
// Change it to expire sooner
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id)
.setExpiration(Expiration.newRelative(0)).build());
waitForCachedBlocks(cluster.getNameNode(), 0, 0, "testExpiry:2");
RemoteIterator<CacheDirectiveEntry> it = dfs.listCacheDirectives(null);
CacheDirectiveEntry ent = it.next();
assertFalse(it.hasNext());
Date entryExpiry = new Date(ent.getInfo().getExpiration().getMillis());
assertTrue("Directive should have expired",
entryExpiry.before(new Date()));
// Change it back to expire later
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id)
.setExpiration(Expiration.newRelative(120000)).build());
waitForCachedBlocks(cluster.getNameNode(), 2, 4, "testExpiry:3");
it = dfs.listCacheDirectives(null);
ent = it.next();
assertFalse(it.hasNext());
entryExpiry = new Date(ent.getInfo().getExpiration().getMillis());
assertTrue("Directive should not have expired",
entryExpiry.after(new Date()));
// Verify that setting a negative TTL throws an error
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id)
.setExpiration(Expiration.newRelative(-1)).build());
} catch (InvalidRequestException e) {
GenericTestUtils
.assertExceptionContains("Cannot set a negative expiration", e);
}
}
@Test(timeout=120000)
public void testLimit() throws Exception {
try {
dfs.addCachePool(new CachePoolInfo("poolofnegativity").setLimit(-99l));
fail("Should not be able to set a negative limit");
} catch (InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("negative", e);
}
final String destiny = "poolofdestiny";
final Path path1 = new Path("/destiny");
DFSTestUtil.createFile(dfs, path1, 2*BLOCK_SIZE, (short)1, 0x9494);
// Start off with a limit that is too small
final CachePoolInfo poolInfo = new CachePoolInfo(destiny)
.setLimit(2*BLOCK_SIZE-1);
dfs.addCachePool(poolInfo);
final CacheDirectiveInfo info1 = new CacheDirectiveInfo.Builder()
.setPool(destiny).setPath(path1).build();
try {
dfs.addCacheDirective(info1);
fail("Should not be able to cache when there is no more limit");
} catch (InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("remaining capacity", e);
}
// Raise the limit up to fit and it should work this time
poolInfo.setLimit(2*BLOCK_SIZE);
dfs.modifyCachePool(poolInfo);
long id1 = dfs.addCacheDirective(info1);
waitForCachePoolStats(dfs,
2*BLOCK_SIZE, 2*BLOCK_SIZE,
1, 1,
poolInfo, "testLimit:1");
// Adding another file, it shouldn't be cached
final Path path2 = new Path("/failure");
DFSTestUtil.createFile(dfs, path2, BLOCK_SIZE, (short)1, 0x9495);
try {
dfs.addCacheDirective(new CacheDirectiveInfo.Builder()
.setPool(destiny).setPath(path2).build(),
EnumSet.noneOf(CacheFlag.class));
fail("Should not be able to add another cached file");
} catch (InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("remaining capacity", e);
}
// Bring the limit down, the first file should get uncached
poolInfo.setLimit(BLOCK_SIZE);
dfs.modifyCachePool(poolInfo);
waitForCachePoolStats(dfs,
2*BLOCK_SIZE, 0,
1, 0,
poolInfo, "testLimit:2");
RemoteIterator<CachePoolEntry> it = dfs.listCachePools();
assertTrue("Expected a cache pool", it.hasNext());
CachePoolStats stats = it.next().getStats();
assertEquals("Overlimit bytes should be difference of needed and limit",
BLOCK_SIZE, stats.getBytesOverlimit());
// Moving a directive to a pool without enough limit should fail
CachePoolInfo inadequate =
new CachePoolInfo("poolofinadequacy").setLimit(BLOCK_SIZE);
dfs.addCachePool(inadequate);
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(info1)
.setId(id1).setPool(inadequate.getPoolName()).build(),
EnumSet.noneOf(CacheFlag.class));
} catch(InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("remaining capacity", e);
}
// Succeeds when force=true
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(info1).setId(id1)
.setPool(inadequate.getPoolName()).build(),
EnumSet.of(CacheFlag.FORCE));
// Also can add with force=true
dfs.addCacheDirective(
new CacheDirectiveInfo.Builder().setPool(inadequate.getPoolName())
.setPath(path1).build(), EnumSet.of(CacheFlag.FORCE));
}
@Test(timeout=30000)
public void testMaxRelativeExpiry() throws Exception {
// Test that negative and really big max expirations can't be set during add
try {
dfs.addCachePool(new CachePoolInfo("failpool").setMaxRelativeExpiryMs(-1l));
fail("Added a pool with a negative max expiry.");
} catch (InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("negative", e);
}
try {
dfs.addCachePool(new CachePoolInfo("failpool")
.setMaxRelativeExpiryMs(Long.MAX_VALUE - 1));
fail("Added a pool with too big of a max expiry.");
} catch (InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("too big", e);
}
// Test that setting a max relative expiry on a pool works
CachePoolInfo coolPool = new CachePoolInfo("coolPool");
final long poolExpiration = 1000 * 60 * 10l;
dfs.addCachePool(coolPool.setMaxRelativeExpiryMs(poolExpiration));
RemoteIterator<CachePoolEntry> poolIt = dfs.listCachePools();
CachePoolInfo listPool = poolIt.next().getInfo();
assertFalse("Should only be one pool", poolIt.hasNext());
assertEquals("Expected max relative expiry to match set value",
poolExpiration, listPool.getMaxRelativeExpiryMs().longValue());
// Test that negative and really big max expirations can't be modified
try {
dfs.addCachePool(coolPool.setMaxRelativeExpiryMs(-1l));
fail("Added a pool with a negative max expiry.");
} catch (InvalidRequestException e) {
assertExceptionContains("negative", e);
}
try {
dfs.modifyCachePool(coolPool
.setMaxRelativeExpiryMs(CachePoolInfo.RELATIVE_EXPIRY_NEVER+1));
fail("Added a pool with too big of a max expiry.");
} catch (InvalidRequestException e) {
assertExceptionContains("too big", e);
}
// Test that adding a directives without an expiration uses the pool's max
CacheDirectiveInfo defaultExpiry = new CacheDirectiveInfo.Builder()
.setPath(new Path("/blah"))
.setPool(coolPool.getPoolName())
.build();
dfs.addCacheDirective(defaultExpiry);
RemoteIterator<CacheDirectiveEntry> dirIt =
dfs.listCacheDirectives(defaultExpiry);
CacheDirectiveInfo listInfo = dirIt.next().getInfo();
assertFalse("Should only have one entry in listing", dirIt.hasNext());
long listExpiration = listInfo.getExpiration().getAbsoluteMillis()
- new Date().getTime();
assertTrue("Directive expiry should be approximately the pool's max expiry",
Math.abs(listExpiration - poolExpiration) < 10*1000);
// Test that the max is enforced on add for relative and absolute
CacheDirectiveInfo.Builder builder = new CacheDirectiveInfo.Builder()
.setPath(new Path("/lolcat"))
.setPool(coolPool.getPoolName());
try {
dfs.addCacheDirective(builder
.setExpiration(Expiration.newRelative(poolExpiration+1))
.build());
fail("Added a directive that exceeds pool's max relative expiration");
} catch (InvalidRequestException e) {
assertExceptionContains("exceeds the max relative expiration", e);
}
try {
dfs.addCacheDirective(builder
.setExpiration(Expiration.newAbsolute(
new Date().getTime() + poolExpiration + (10*1000)))
.build());
fail("Added a directive that exceeds pool's max relative expiration");
} catch (InvalidRequestException e) {
assertExceptionContains("exceeds the max relative expiration", e);
}
// Test that max is enforced on modify for relative and absolute Expirations
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry)
.setId(listInfo.getId())
.setExpiration(Expiration.newRelative(poolExpiration+1))
.build());
fail("Modified a directive to exceed pool's max relative expiration");
} catch (InvalidRequestException e) {
assertExceptionContains("exceeds the max relative expiration", e);
}
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry)
.setId(listInfo.getId())
.setExpiration(Expiration.newAbsolute(
new Date().getTime() + poolExpiration + (10*1000)))
.build());
fail("Modified a directive to exceed pool's max relative expiration");
} catch (InvalidRequestException e) {
assertExceptionContains("exceeds the max relative expiration", e);
}
// Test some giant limit values with add
try {
dfs.addCacheDirective(builder
.setExpiration(Expiration.newRelative(
Long.MAX_VALUE))
.build());
fail("Added a directive with a gigantic max value");
} catch (IllegalArgumentException e) {
assertExceptionContains("is too far in the future", e);
}
try {
dfs.addCacheDirective(builder
.setExpiration(Expiration.newAbsolute(
Long.MAX_VALUE))
.build());
fail("Added a directive with a gigantic max value");
} catch (InvalidRequestException e) {
assertExceptionContains("is too far in the future", e);
}
// Test some giant limit values with modify
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry)
.setId(listInfo.getId())
.setExpiration(Expiration.NEVER)
.build());
fail("Modified a directive to exceed pool's max relative expiration");
} catch (InvalidRequestException e) {
assertExceptionContains("exceeds the max relative expiration", e);
}
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry)
.setId(listInfo.getId())
.setExpiration(Expiration.newAbsolute(
Long.MAX_VALUE))
.build());
fail("Modified a directive to exceed pool's max relative expiration");
} catch (InvalidRequestException e) {
assertExceptionContains("is too far in the future", e);
}
// Test that the max is enforced on modify correctly when changing pools
CachePoolInfo destPool = new CachePoolInfo("destPool");
dfs.addCachePool(destPool.setMaxRelativeExpiryMs(poolExpiration / 2));
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry)
.setId(listInfo.getId())
.setPool(destPool.getPoolName())
.build());
fail("Modified a directive to a pool with a lower max expiration");
} catch (InvalidRequestException e) {
assertExceptionContains("exceeds the max relative expiration", e);
}
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry)
.setId(listInfo.getId())
.setPool(destPool.getPoolName())
.setExpiration(Expiration.newRelative(poolExpiration / 2))
.build());
dirIt = dfs.listCacheDirectives(new CacheDirectiveInfo.Builder()
.setPool(destPool.getPoolName())
.build());
listInfo = dirIt.next().getInfo();
listExpiration = listInfo.getExpiration().getAbsoluteMillis()
- new Date().getTime();
assertTrue("Unexpected relative expiry " + listExpiration
+ " expected approximately " + poolExpiration/2,
Math.abs(poolExpiration/2 - listExpiration) < 10*1000);
// Test that cache pool and directive expiry can be modified back to never
dfs.modifyCachePool(destPool
.setMaxRelativeExpiryMs(CachePoolInfo.RELATIVE_EXPIRY_NEVER));
poolIt = dfs.listCachePools();
listPool = poolIt.next().getInfo();
while (!listPool.getPoolName().equals(destPool.getPoolName())) {
listPool = poolIt.next().getInfo();
}
assertEquals("Expected max relative expiry to match set value",
CachePoolInfo.RELATIVE_EXPIRY_NEVER,
listPool.getMaxRelativeExpiryMs().longValue());
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder()
.setId(listInfo.getId())
.setExpiration(Expiration.newRelative(RELATIVE_EXPIRY_NEVER))
.build());
// Test modifying close to the limit
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder()
.setId(listInfo.getId())
.setExpiration(Expiration.newRelative(RELATIVE_EXPIRY_NEVER - 1))
.build());
}
/**
* Check that the NameNode is not attempting to cache anything.
*/
private void checkPendingCachedEmpty(MiniDFSCluster cluster)
throws Exception {
cluster.getNamesystem().readLock();
try {
final DatanodeManager datanodeManager =
cluster.getNamesystem().getBlockManager().getDatanodeManager();
for (DataNode dn : cluster.getDataNodes()) {
DatanodeDescriptor descriptor =
datanodeManager.getDatanode(dn.getDatanodeId());
Assert.assertTrue("Pending cached list of " + descriptor +
" is not empty, "
+ Arrays.toString(descriptor.getPendingCached().toArray()),
descriptor.getPendingCached().isEmpty());
}
} finally {
cluster.getNamesystem().readUnlock();
}
}
@Test(timeout=60000)
public void testExceedsCapacity() throws Exception {
// Create a giant file
final Path fileName = new Path("/exceeds");
final long fileLen = CACHE_CAPACITY * (NUM_DATANODES*2);
int numCachedReplicas = (int) ((CACHE_CAPACITY*NUM_DATANODES)/BLOCK_SIZE);
DFSTestUtil.createFile(dfs, fileName, fileLen, (short) NUM_DATANODES,
0xFADED);
dfs.addCachePool(new CachePoolInfo("pool"));
dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool("pool")
.setPath(fileName).setReplication((short) 1).build());
waitForCachedBlocks(namenode, -1, numCachedReplicas,
"testExceeds:1");
checkPendingCachedEmpty(cluster);
Thread.sleep(1000);
checkPendingCachedEmpty(cluster);
// Try creating a file with giant-sized blocks that exceed cache capacity
dfs.delete(fileName, false);
DFSTestUtil.createFile(dfs, fileName, 4096, fileLen, CACHE_CAPACITY * 2,
(short) 1, 0xFADED);
checkPendingCachedEmpty(cluster);
Thread.sleep(1000);
checkPendingCachedEmpty(cluster);
}
@Test(timeout=60000)
public void testNoBackingReplica() throws Exception {
// Cache all three replicas for a file.
final Path filename = new Path("/noback");
final short replication = (short) 3;
DFSTestUtil.createFile(dfs, filename, 1, replication, 0x0BAC);
dfs.addCachePool(new CachePoolInfo("pool"));
dfs.addCacheDirective(
new CacheDirectiveInfo.Builder().setPool("pool").setPath(filename)
.setReplication(replication).build());
waitForCachedBlocks(namenode, 1, replication, "testNoBackingReplica:1");
// Pause cache reports while we change the replication factor.
// This will orphan some cached replicas.
DataNodeTestUtils.setCacheReportsDisabledForTests(cluster, true);
try {
dfs.setReplication(filename, (short) 1);
DFSTestUtil.waitForReplication(dfs, filename, (short) 1, 30000);
// The cache locations should drop down to 1 even without cache reports.
waitForCachedBlocks(namenode, 1, (short) 1, "testNoBackingReplica:2");
} finally {
DataNodeTestUtils.setCacheReportsDisabledForTests(cluster, false);
}
}
}
| 59,597 | 39.214575 | 101 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After;
import org.junit.Test;
import java.io.File;
import java.io.IOException;
import java.net.BindException;
import java.util.Random;
/**
* This class tests the validation of the configuration object when passed
* to the NameNode
*/
public class TestValidateConfigurationSettings {
@After
public void cleanUp() {
FileUtil.fullyDeleteContents(new File(MiniDFSCluster.getBaseDirectory()));
}
/**
* Tests setting the rpc port to the same as the web port to test that
* an exception
* is thrown when trying to re-use the same port
*/
@Test(expected = BindException.class, timeout = 300000)
public void testThatMatchingRPCandHttpPortsThrowException()
throws IOException {
NameNode nameNode = null;
try {
Configuration conf = new HdfsConfiguration();
File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name");
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
nameDir.getAbsolutePath());
Random rand = new Random();
final int port = 30000 + rand.nextInt(30000);
// set both of these to the same port. It should fail.
FileSystem.setDefaultUri(conf, "hdfs://localhost:" + port);
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:" + port);
DFSTestUtil.formatNameNode(conf);
nameNode = new NameNode(conf);
} finally {
if (nameNode != null) {
nameNode.stop();
}
}
}
/**
* Tests setting the rpc port to a different as the web port that an
* exception is NOT thrown
*/
@Test(timeout = 300000)
public void testThatDifferentRPCandHttpPortsAreOK()
throws IOException {
Configuration conf = new HdfsConfiguration();
File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name");
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
nameDir.getAbsolutePath());
Random rand = new Random();
// A few retries in case the ports we choose are in use.
for (int i = 0; i < 5; ++i) {
final int port1 = 30000 + rand.nextInt(10000);
final int port2 = port1 + 1 + rand.nextInt(10000);
FileSystem.setDefaultUri(conf, "hdfs://localhost:" + port1);
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:" + port2);
DFSTestUtil.formatNameNode(conf);
NameNode nameNode = null;
try {
nameNode = new NameNode(conf); // should be OK!
break;
} catch(BindException be) {
continue; // Port in use? Try another.
} finally {
if (nameNode != null) {
nameNode.stop();
}
}
}
}
/**
* HDFS-3013: NameNode format command doesn't pick up
* dfs.namenode.name.dir.NameServiceId configuration.
*/
@Test(timeout = 300000)
public void testGenericKeysForNameNodeFormat()
throws IOException {
Configuration conf = new HdfsConfiguration();
// Set ephemeral ports
conf.set(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY,
"127.0.0.1:0");
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY,
"127.0.0.1:0");
conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns1");
// Set a nameservice-specific configuration for name dir
File dir = new File(MiniDFSCluster.getBaseDirectory(),
"testGenericKeysForNameNodeFormat");
if (dir.exists()) {
FileUtil.fullyDelete(dir);
}
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + ".ns1",
dir.getAbsolutePath());
// Format and verify the right dir is formatted.
DFSTestUtil.formatNameNode(conf);
GenericTestUtils.assertExists(dir);
// Ensure that the same dir is picked up by the running NN
NameNode nameNode = new NameNode(conf);
nameNode.stop();
}
}
| 4,961 | 31.860927 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockPlacementPolicyRackFaultTolerant.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolerant;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.net.StaticMapping;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import java.io.IOException;
import java.util.*;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
public class TestBlockPlacementPolicyRackFaultTolerant {
private static final int DEFAULT_BLOCK_SIZE = 1024;
private MiniDFSCluster cluster = null;
private NamenodeProtocols nameNodeRpc = null;
private FSNamesystem namesystem = null;
private PermissionStatus perm = null;
@Before
public void setup() throws IOException {
StaticMapping.resetMap();
Configuration conf = new HdfsConfiguration();
final ArrayList<String> rackList = new ArrayList<String>();
final ArrayList<String> hostList = new ArrayList<String>();
for (int i = 0; i < 10; i++) {
for (int j = 0; j < 2; j++) {
rackList.add("/rack" + i);
hostList.add("/host" + i + j);
}
}
conf.setClass(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY,
BlockPlacementPolicyRackFaultTolerant.class,
BlockPlacementPolicy.class);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE / 2);
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(hostList.size())
.racks(rackList.toArray(new String[rackList.size()]))
.hosts(hostList.toArray(new String[hostList.size()]))
.build();
cluster.waitActive();
nameNodeRpc = cluster.getNameNodeRpc();
namesystem = cluster.getNamesystem();
perm = new PermissionStatus("TestBlockPlacementPolicyEC", null,
FsPermission.getDefault());
}
@After
public void teardown() {
if (cluster != null) {
cluster.shutdown();
}
}
@Test
public void testChooseTarget() throws Exception {
doTestChooseTargetNormalCase();
doTestChooseTargetSpecialCase();
}
private void doTestChooseTargetNormalCase() throws Exception {
String clientMachine = "client.foo.com";
short[][] testSuite = {
{3, 2}, {3, 7}, {3, 8}, {3, 10}, {9, 1}, {10, 1}, {10, 6}, {11, 6},
{11, 9}
};
// Test 5 files
int fileCount = 0;
for (int i = 0; i < 5; i++) {
for (short[] testCase : testSuite) {
short replication = testCase[0];
short additionalReplication = testCase[1];
String src = "/testfile" + (fileCount++);
// Create the file with client machine
HdfsFileStatus fileStatus = namesystem.startFile(src, perm,
clientMachine, clientMachine, EnumSet.of(CreateFlag.CREATE), true,
replication, DEFAULT_BLOCK_SIZE, null, false);
//test chooseTarget for new file
LocatedBlock locatedBlock = nameNodeRpc.addBlock(src, clientMachine,
null, null, fileStatus.getFileId(), null);
doTestLocatedBlock(replication, locatedBlock);
//test chooseTarget for existing file.
LocatedBlock additionalLocatedBlock =
nameNodeRpc.getAdditionalDatanode(src, fileStatus.getFileId(),
locatedBlock.getBlock(), locatedBlock.getLocations(),
locatedBlock.getStorageIDs(), new DatanodeInfo[0],
additionalReplication, clientMachine);
doTestLocatedBlock(replication + additionalReplication, additionalLocatedBlock);
}
}
}
/**
* Test more randomly. So it covers some special cases.
* Like when some racks already have 2 replicas, while some racks have none,
* we should choose the racks that have none.
*/
private void doTestChooseTargetSpecialCase() throws Exception {
String clientMachine = "client.foo.com";
// Test 5 files
String src = "/testfile_1_";
// Create the file with client machine
HdfsFileStatus fileStatus = namesystem.startFile(src, perm,
clientMachine, clientMachine, EnumSet.of(CreateFlag.CREATE), true,
(short) 20, DEFAULT_BLOCK_SIZE, null, false);
//test chooseTarget for new file
LocatedBlock locatedBlock = nameNodeRpc.addBlock(src, clientMachine,
null, null, fileStatus.getFileId(), null);
doTestLocatedBlock(20, locatedBlock);
DatanodeInfo[] locs = locatedBlock.getLocations();
String[] storageIDs = locatedBlock.getStorageIDs();
for (int time = 0; time < 5; time++) {
shuffle(locs, storageIDs);
for (int i = 1; i < locs.length; i++) {
DatanodeInfo[] partLocs = new DatanodeInfo[i];
String[] partStorageIDs = new String[i];
System.arraycopy(locs, 0, partLocs, 0, i);
System.arraycopy(storageIDs, 0, partStorageIDs, 0, i);
for (int j = 1; j < 20 - i; j++) {
LocatedBlock additionalLocatedBlock =
nameNodeRpc.getAdditionalDatanode(src, fileStatus.getFileId(),
locatedBlock.getBlock(), partLocs,
partStorageIDs, new DatanodeInfo[0],
j, clientMachine);
doTestLocatedBlock(i + j, additionalLocatedBlock);
}
}
}
}
private void shuffle(DatanodeInfo[] locs, String[] storageIDs) {
int length = locs.length;
Object[][] pairs = new Object[length][];
for (int i = 0; i < length; i++) {
pairs[i] = new Object[]{locs[i], storageIDs[i]};
}
DFSUtil.shuffle(pairs);
for (int i = 0; i < length; i++) {
locs[i] = (DatanodeInfo) pairs[i][0];
storageIDs[i] = (String) pairs[i][1];
}
}
private void doTestLocatedBlock(int replication, LocatedBlock locatedBlock) {
assertEquals(replication, locatedBlock.getLocations().length);
HashMap<String, Integer> racksCount = new HashMap<String, Integer>();
for (DatanodeInfo node :
locatedBlock.getLocations()) {
addToRacksCount(node.getNetworkLocation(), racksCount);
}
int minCount = Integer.MAX_VALUE;
int maxCount = Integer.MIN_VALUE;
for (Integer rackCount : racksCount.values()) {
minCount = Math.min(minCount, rackCount);
maxCount = Math.max(maxCount, rackCount);
}
assertTrue(maxCount - minCount <= 1);
}
private void addToRacksCount(String rack, HashMap<String, Integer> racksCount) {
Integer count = racksCount.get(rack);
if (count == null) {
racksCount.put(rack, 1);
} else {
racksCount.put(rack, count + 1);
}
}
}
| 7,978 | 36.995238 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.*;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Enumeration;
import java.util.List;
import java.util.regex.Pattern;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.web.HftpFileSystem;
import org.apache.hadoop.hdfs.web.WebHdfsConstants;
import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.PathUtils;
import org.apache.log4j.Appender;
import org.apache.log4j.AsyncAppender;
import org.apache.log4j.Level;
import org.apache.log4j.LogManager;
import org.apache.log4j.Logger;
import org.apache.log4j.PatternLayout;
import org.apache.log4j.RollingFileAppender;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
/**
* A JUnit test that audit logs are generated
*/
@RunWith(Parameterized.class)
public class TestAuditLogs {
static final String auditLogFile = PathUtils.getTestDirName(TestAuditLogs.class) + "/TestAuditLogs-audit.log";
final boolean useAsyncLog;
@Parameters
public static Collection<Object[]> data() {
Collection<Object[]> params = new ArrayList<Object[]>();
params.add(new Object[]{new Boolean(false)});
params.add(new Object[]{new Boolean(true)});
return params;
}
public TestAuditLogs(boolean useAsyncLog) {
this.useAsyncLog = useAsyncLog;
}
// Pattern for:
// allowed=(true|false) ugi=name ip=/address cmd={cmd} src={path} dst=null perm=null
static final Pattern auditPattern = Pattern.compile(
"allowed=.*?\\s" +
"ugi=.*?\\s" +
"ip=/\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\s" +
"cmd=.*?\\ssrc=.*?\\sdst=null\\s" +
"perm=.*?");
static final Pattern successPattern = Pattern.compile(
".*allowed=true.*");
static final Pattern webOpenPattern = Pattern.compile(
".*cmd=open.*proto=webhdfs.*");
static final String username = "bob";
static final String[] groups = { "group1" };
static final String fileName = "/srcdat";
DFSTestUtil util;
MiniDFSCluster cluster;
FileSystem fs;
String fnames[];
Configuration conf;
UserGroupInformation userGroupInfo;
@Before
public void setupCluster() throws Exception {
// must configure prior to instantiating the namesystem because it
// will reconfigure the logger if async is enabled
configureAuditLogs();
conf = new HdfsConfiguration();
final long precision = 1L;
conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, precision);
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
conf.setBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_ASYNC_KEY, useAsyncLog);
util = new DFSTestUtil.Builder().setName("TestAuditAllowed").
setNumFiles(20).build();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
fs = cluster.getFileSystem();
util.createFiles(fs, fileName);
// make sure the appender is what it's supposed to be
Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
@SuppressWarnings("unchecked")
List<Appender> appenders = Collections.list(logger.getAllAppenders());
assertEquals(1, appenders.size());
assertEquals(useAsyncLog, appenders.get(0) instanceof AsyncAppender);
fnames = util.getFileNames(fileName);
util.waitReplication(fs, fileName, (short)3);
userGroupInfo = UserGroupInformation.createUserForTesting(username, groups);
}
@After
public void teardownCluster() throws Exception {
util.cleanup(fs, "/srcdat");
fs.close();
cluster.shutdown();
}
/** test that allowed operation puts proper entry in audit log */
@Test
public void testAuditAllowed() throws Exception {
final Path file = new Path(fnames[0]);
FileSystem userfs = DFSTestUtil.getFileSystemAs(userGroupInfo, conf);
setupAuditLogs();
InputStream istream = userfs.open(file);
int val = istream.read();
istream.close();
verifyAuditLogs(true);
assertTrue("failed to read from file", val >= 0);
}
/** test that allowed stat puts proper entry in audit log */
@Test
public void testAuditAllowedStat() throws Exception {
final Path file = new Path(fnames[0]);
FileSystem userfs = DFSTestUtil.getFileSystemAs(userGroupInfo, conf);
setupAuditLogs();
FileStatus st = userfs.getFileStatus(file);
verifyAuditLogs(true);
assertTrue("failed to stat file", st != null && st.isFile());
}
/** test that denied operation puts proper entry in audit log */
@Test
public void testAuditDenied() throws Exception {
final Path file = new Path(fnames[0]);
FileSystem userfs = DFSTestUtil.getFileSystemAs(userGroupInfo, conf);
fs.setPermission(file, new FsPermission((short)0600));
fs.setOwner(file, "root", null);
setupAuditLogs();
try {
userfs.open(file);
fail("open must not succeed");
} catch(AccessControlException e) {
System.out.println("got access denied, as expected.");
}
verifyAuditLogs(false);
}
/** test that access via webhdfs puts proper entry in audit log */
@Test
public void testAuditWebHdfs() throws Exception {
final Path file = new Path(fnames[0]);
fs.setPermission(file, new FsPermission((short)0644));
fs.setOwner(file, "root", null);
setupAuditLogs();
WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsConstants.WEBHDFS_SCHEME);
InputStream istream = webfs.open(file);
int val = istream.read();
istream.close();
verifyAuditLogsRepeat(true, 3);
assertTrue("failed to read from file", val >= 0);
}
/** test that stat via webhdfs puts proper entry in audit log */
@Test
public void testAuditWebHdfsStat() throws Exception {
final Path file = new Path(fnames[0]);
fs.setPermission(file, new FsPermission((short)0644));
fs.setOwner(file, "root", null);
setupAuditLogs();
WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsConstants.WEBHDFS_SCHEME);
FileStatus st = webfs.getFileStatus(file);
verifyAuditLogs(true);
assertTrue("failed to stat file", st != null && st.isFile());
}
/** test that access via Hftp puts proper entry in audit log */
@Test
public void testAuditHftp() throws Exception {
final Path file = new Path(fnames[0]);
final String hftpUri =
"hftp://" + conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
HftpFileSystem hftpFs = null;
setupAuditLogs();
try {
hftpFs = (HftpFileSystem) new Path(hftpUri).getFileSystem(conf);
InputStream istream = hftpFs.open(file);
@SuppressWarnings("unused")
int val = istream.read();
istream.close();
verifyAuditLogs(true);
} finally {
if (hftpFs != null) hftpFs.close();
}
}
/** test that denied access via webhdfs puts proper entry in audit log */
@Test
public void testAuditWebHdfsDenied() throws Exception {
final Path file = new Path(fnames[0]);
fs.setPermission(file, new FsPermission((short)0600));
fs.setOwner(file, "root", null);
setupAuditLogs();
try {
WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsConstants.WEBHDFS_SCHEME);
InputStream istream = webfs.open(file);
int val = istream.read();
fail("open+read must not succeed, got " + val);
} catch(AccessControlException E) {
System.out.println("got access denied, as expected.");
}
verifyAuditLogsRepeat(false, 2);
}
/** test that open via webhdfs puts proper entry in audit log */
@Test
public void testAuditWebHdfsOpen() throws Exception {
final Path file = new Path(fnames[0]);
fs.setPermission(file, new FsPermission((short)0644));
fs.setOwner(file, "root", null);
setupAuditLogs();
WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsConstants.WEBHDFS_SCHEME);
webfs.open(file);
verifyAuditLogsCheckPattern(true, 3, webOpenPattern);
}
/** Sets up log4j logger for auditlogs */
private void setupAuditLogs() throws IOException {
Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
// enable logging now that the test is ready to run
logger.setLevel(Level.INFO);
}
private void configureAuditLogs() throws IOException {
// Shutdown the LogManager to release all logger open file handles.
// Unfortunately, Apache commons logging library does not provide
// means to release underlying loggers. For additional info look up
// commons library FAQ.
LogManager.shutdown();
File file = new File(auditLogFile);
if (file.exists()) {
assertTrue(file.delete());
}
Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
// disable logging while the cluster startup preps files
logger.setLevel(Level.OFF);
PatternLayout layout = new PatternLayout("%m%n");
RollingFileAppender appender = new RollingFileAppender(layout, auditLogFile);
logger.addAppender(appender);
}
// Ensure audit log has only one entry
private void verifyAuditLogs(boolean expectSuccess) throws IOException {
verifyAuditLogsRepeat(expectSuccess, 1);
}
// Ensure audit log has exactly N entries
private void verifyAuditLogsRepeat(boolean expectSuccess, int ndupe)
throws IOException {
// Turn off the logs
Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
logger.setLevel(Level.OFF);
// Close the appenders and force all logs to be flushed
Enumeration<?> appenders = logger.getAllAppenders();
while (appenders.hasMoreElements()) {
Appender appender = (Appender)appenders.nextElement();
appender.close();
}
BufferedReader reader = new BufferedReader(new FileReader(auditLogFile));
String line = null;
boolean ret = true;
try {
for (int i = 0; i < ndupe; i++) {
line = reader.readLine();
assertNotNull(line);
assertTrue("Expected audit event not found in audit log",
auditPattern.matcher(line).matches());
ret &= successPattern.matcher(line).matches();
}
assertNull("Unexpected event in audit log", reader.readLine());
assertTrue("Expected success=" + expectSuccess, ret == expectSuccess);
} finally {
reader.close();
}
}
// Ensure audit log has exactly N entries
private void verifyAuditLogsCheckPattern(boolean expectSuccess, int ndupe, Pattern pattern)
throws IOException {
// Turn off the logs
Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
logger.setLevel(Level.OFF);
// Close the appenders and force all logs to be flushed
Enumeration<?> appenders = logger.getAllAppenders();
while (appenders.hasMoreElements()) {
Appender appender = (Appender)appenders.nextElement();
appender.close();
}
BufferedReader reader = new BufferedReader(new FileReader(auditLogFile));
String line = null;
boolean ret = true;
boolean patternMatches = false;
try {
for (int i = 0; i < ndupe; i++) {
line = reader.readLine();
assertNotNull(line);
patternMatches |= pattern.matcher(line).matches();
ret &= successPattern.matcher(line).matches();
}
assertNull("Unexpected event in audit log", reader.readLine());
assertTrue("Expected audit event not found in audit log", patternMatches);
assertTrue("Expected success=" + expectSuccess, ret == expectSuccess);
} finally {
reader.close();
}
}
}
| 13,462 | 33.788114 | 125 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/AclTestHelpers.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.*;
import java.io.IOException;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclEntryScope;
import org.apache.hadoop.fs.permission.AclEntryType;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
/**
* Helper methods useful for writing ACL tests.
*/
public final class AclTestHelpers {
/**
* Create a new AclEntry with scope, type and permission (no name).
*
* @param scope AclEntryScope scope of the ACL entry
* @param type AclEntryType ACL entry type
* @param permission FsAction set of permissions in the ACL entry
* @return AclEntry new AclEntry
*/
public static AclEntry aclEntry(AclEntryScope scope, AclEntryType type,
FsAction permission) {
return new AclEntry.Builder()
.setScope(scope)
.setType(type)
.setPermission(permission)
.build();
}
/**
* Create a new AclEntry with scope, type, name and permission.
*
* @param scope AclEntryScope scope of the ACL entry
* @param type AclEntryType ACL entry type
* @param name String optional ACL entry name
* @param permission FsAction set of permissions in the ACL entry
* @return AclEntry new AclEntry
*/
public static AclEntry aclEntry(AclEntryScope scope, AclEntryType type,
String name, FsAction permission) {
return new AclEntry.Builder()
.setScope(scope)
.setType(type)
.setName(name)
.setPermission(permission)
.build();
}
/**
* Create a new AclEntry with scope, type and name (no permission).
*
* @param scope AclEntryScope scope of the ACL entry
* @param type AclEntryType ACL entry type
* @param name String optional ACL entry name
* @return AclEntry new AclEntry
*/
public static AclEntry aclEntry(AclEntryScope scope, AclEntryType type,
String name) {
return new AclEntry.Builder()
.setScope(scope)
.setType(type)
.setName(name)
.build();
}
/**
* Create a new AclEntry with scope and type (no name or permission).
*
* @param scope AclEntryScope scope of the ACL entry
* @param type AclEntryType ACL entry type
* @return AclEntry new AclEntry
*/
public static AclEntry aclEntry(AclEntryScope scope, AclEntryType type) {
return new AclEntry.Builder()
.setScope(scope)
.setType(type)
.build();
}
/**
* Asserts that permission is denied to the given fs/user for the given file.
*
* @param fs FileSystem to check
* @param user UserGroupInformation owner of fs
* @param pathToCheck Path file to check
* @throws Exception if there is an unexpected error
*/
public static void assertFilePermissionDenied(FileSystem fs,
UserGroupInformation user, Path pathToCheck) throws Exception {
try {
DFSTestUtil.readFileBuffer(fs, pathToCheck);
fail("expected AccessControlException for user " + user + ", path = " +
pathToCheck);
} catch (AccessControlException e) {
// expected
}
}
/**
* Asserts that permission is granted to the given fs/user for the given file.
*
* @param fs FileSystem to check
* @param user UserGroupInformation owner of fs
* @param pathToCheck Path file to check
* @throws Exception if there is an unexpected error
*/
public static void assertFilePermissionGranted(FileSystem fs,
UserGroupInformation user, Path pathToCheck) throws Exception {
try {
DFSTestUtil.readFileBuffer(fs, pathToCheck);
} catch (AccessControlException e) {
fail("expected permission granted for user " + user + ", path = " +
pathToCheck);
}
}
/**
* Asserts the value of the FsPermission bits on the inode of a specific path.
*
* @param fs FileSystem to use for check
* @param pathToCheck Path inode to check
* @param perm short expected permission bits
* @throws IOException thrown if there is an I/O error
*/
public static void assertPermission(FileSystem fs, Path pathToCheck,
short perm) throws IOException {
short filteredPerm = (short)(perm & 01777);
FsPermission fsPermission = fs.getFileStatus(pathToCheck).getPermission();
assertEquals(filteredPerm, fsPermission.toShort());
assertEquals(((perm & (1 << 12)) != 0), fsPermission.getAclBit());
}
}
| 5,443 | 33.025 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeduplicationMap.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.SaverContext.DeduplicationMap;
import org.junit.Assert;
import org.junit.Test;
public class TestDeduplicationMap {
@Test
public void testDeduplicationMap() {
DeduplicationMap<String> m = DeduplicationMap.newMap();
Assert.assertEquals(1, m.getId("1"));
Assert.assertEquals(2, m.getId("2"));
Assert.assertEquals(3, m.getId("3"));
Assert.assertEquals(1, m.getId("1"));
Assert.assertEquals(2, m.getId("2"));
Assert.assertEquals(3, m.getId("3"));
}
}
| 1,406 | 37.027027 | 98 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.util.EnumSet;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSOutputStream;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.QuotaByStorageTypeExceededException;
import org.apache.hadoop.ipc.RemoteException;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
public class TestDiskspaceQuotaUpdate {
private static final int BLOCKSIZE = 1024;
private static final short REPLICATION = 4;
static final long seed = 0L;
private static final Path dir = new Path("/TestQuotaUpdate");
private Configuration conf;
private MiniDFSCluster cluster;
private FSDirectory fsdir;
private DistributedFileSystem dfs;
@Before
public void setUp() throws Exception {
conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
.build();
cluster.waitActive();
fsdir = cluster.getNamesystem().getFSDirectory();
dfs = cluster.getFileSystem();
}
@After
public void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
/**
* Test if the quota can be correctly updated for create file
*/
@Test (timeout=60000)
public void testQuotaUpdateWithFileCreate() throws Exception {
final Path foo = new Path(dir, "foo");
Path createdFile = new Path(foo, "created_file.data");
dfs.mkdirs(foo);
dfs.setQuota(foo, Long.MAX_VALUE-1, Long.MAX_VALUE-1);
long fileLen = BLOCKSIZE * 2 + BLOCKSIZE / 2;
DFSTestUtil.createFile(dfs, createdFile, BLOCKSIZE / 16,
fileLen, BLOCKSIZE, REPLICATION, seed);
INode fnode = fsdir.getINode4Write(foo.toString());
assertTrue(fnode.isDirectory());
assertTrue(fnode.isQuotaSet());
QuotaCounts cnt = fnode.asDirectory().getDirectoryWithQuotaFeature()
.getSpaceConsumed();
assertEquals(2, cnt.getNameSpace());
assertEquals(fileLen * REPLICATION, cnt.getStorageSpace());
}
/**
* Test if the quota can be correctly updated for append
*/
@Test (timeout=60000)
public void testUpdateQuotaForAppend() throws Exception {
final Path foo = new Path(dir ,"foo");
final Path bar = new Path(foo, "bar");
long currentFileLen = BLOCKSIZE;
DFSTestUtil.createFile(dfs, bar, currentFileLen, REPLICATION, seed);
dfs.setQuota(foo, Long.MAX_VALUE-1, Long.MAX_VALUE-1);
// append half of the block data, the previous file length is at block
// boundary
DFSTestUtil.appendFile(dfs, bar, BLOCKSIZE / 2);
currentFileLen += (BLOCKSIZE / 2);
INodeDirectory fooNode = fsdir.getINode4Write(foo.toString()).asDirectory();
assertTrue(fooNode.isQuotaSet());
QuotaCounts quota = fooNode.getDirectoryWithQuotaFeature()
.getSpaceConsumed();
long ns = quota.getNameSpace();
long ds = quota.getStorageSpace();
assertEquals(2, ns); // foo and bar
assertEquals(currentFileLen * REPLICATION, ds);
ContentSummary c = dfs.getContentSummary(foo);
assertEquals(c.getSpaceConsumed(), ds);
// append another block, the previous file length is not at block boundary
DFSTestUtil.appendFile(dfs, bar, BLOCKSIZE);
currentFileLen += BLOCKSIZE;
quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
ns = quota.getNameSpace();
ds = quota.getStorageSpace();
assertEquals(2, ns); // foo and bar
assertEquals(currentFileLen * REPLICATION, ds);
c = dfs.getContentSummary(foo);
assertEquals(c.getSpaceConsumed(), ds);
// append several blocks
DFSTestUtil.appendFile(dfs, bar, BLOCKSIZE * 3 + BLOCKSIZE / 8);
currentFileLen += (BLOCKSIZE * 3 + BLOCKSIZE / 8);
quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
ns = quota.getNameSpace();
ds = quota.getStorageSpace();
assertEquals(2, ns); // foo and bar
assertEquals(currentFileLen * REPLICATION, ds);
c = dfs.getContentSummary(foo);
assertEquals(c.getSpaceConsumed(), ds);
}
/**
* Test if the quota can be correctly updated when file length is updated
* through fsync
*/
@Test (timeout=60000)
public void testUpdateQuotaForFSync() throws Exception {
final Path foo = new Path("/foo");
final Path bar = new Path(foo, "bar");
DFSTestUtil.createFile(dfs, bar, BLOCKSIZE, REPLICATION, 0L);
dfs.setQuota(foo, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
FSDataOutputStream out = dfs.append(bar);
out.write(new byte[BLOCKSIZE / 4]);
((DFSOutputStream) out.getWrappedStream()).hsync(EnumSet.of(HdfsDataOutputStream.SyncFlag.UPDATE_LENGTH));
INodeDirectory fooNode = fsdir.getINode4Write(foo.toString()).asDirectory();
QuotaCounts quota = fooNode.getDirectoryWithQuotaFeature()
.getSpaceConsumed();
long ns = quota.getNameSpace();
long ds = quota.getStorageSpace();
assertEquals(2, ns); // foo and bar
assertEquals(BLOCKSIZE * 2 * REPLICATION, ds); // file is under construction
out.write(new byte[BLOCKSIZE / 4]);
out.close();
fooNode = fsdir.getINode4Write(foo.toString()).asDirectory();
quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
ns = quota.getNameSpace();
ds = quota.getStorageSpace();
assertEquals(2, ns);
assertEquals((BLOCKSIZE + BLOCKSIZE / 2) * REPLICATION, ds);
// append another block
DFSTestUtil.appendFile(dfs, bar, BLOCKSIZE);
quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
ns = quota.getNameSpace();
ds = quota.getStorageSpace();
assertEquals(2, ns); // foo and bar
assertEquals((BLOCKSIZE * 2 + BLOCKSIZE / 2) * REPLICATION, ds);
}
/**
* Test append over storage quota does not mark file as UC or create lease
*/
@Test (timeout=60000)
public void testAppendOverStorageQuota() throws Exception {
final Path dir = new Path("/TestAppendOverQuota");
final Path file = new Path(dir, "file");
// create partial block file
dfs.mkdirs(dir);
DFSTestUtil.createFile(dfs, file, BLOCKSIZE/2, REPLICATION, seed);
// lower quota to cause exception when appending to partial block
dfs.setQuota(dir, Long.MAX_VALUE - 1, 1);
final INodeDirectory dirNode = fsdir.getINode4Write(dir.toString())
.asDirectory();
final long spaceUsed = dirNode.getDirectoryWithQuotaFeature()
.getSpaceConsumed().getStorageSpace();
try {
DFSTestUtil.appendFile(dfs, file, BLOCKSIZE);
Assert.fail("append didn't fail");
} catch (DSQuotaExceededException e) {
// ignore
}
LeaseManager lm = cluster.getNamesystem().getLeaseManager();
// check that the file exists, isn't UC, and has no dangling lease
INodeFile inode = fsdir.getINode(file.toString()).asFile();
Assert.assertNotNull(inode);
Assert.assertFalse("should not be UC", inode.isUnderConstruction());
Assert.assertNull("should not have a lease", lm.getLease(inode));
// make sure the quota usage is unchanged
final long newSpaceUsed = dirNode.getDirectoryWithQuotaFeature()
.getSpaceConsumed().getStorageSpace();
assertEquals(spaceUsed, newSpaceUsed);
// make sure edits aren't corrupted
dfs.recoverLease(file);
cluster.restartNameNodes();
}
/**
* Test append over a specific type of storage quota does not mark file as
* UC or create a lease
*/
@Test (timeout=60000)
public void testAppendOverTypeQuota() throws Exception {
final Path dir = new Path("/TestAppendOverTypeQuota");
final Path file = new Path(dir, "file");
// create partial block file
dfs.mkdirs(dir);
// set the storage policy on dir
dfs.setStoragePolicy(dir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
DFSTestUtil.createFile(dfs, file, BLOCKSIZE/2, REPLICATION, seed);
// set quota of SSD to 1L
dfs.setQuotaByStorageType(dir, StorageType.SSD, 1L);
final INodeDirectory dirNode = fsdir.getINode4Write(dir.toString())
.asDirectory();
final long spaceUsed = dirNode.getDirectoryWithQuotaFeature()
.getSpaceConsumed().getStorageSpace();
try {
DFSTestUtil.appendFile(dfs, file, BLOCKSIZE);
Assert.fail("append didn't fail");
} catch (QuotaByStorageTypeExceededException e) {
//ignore
}
// check that the file exists, isn't UC, and has no dangling lease
LeaseManager lm = cluster.getNamesystem().getLeaseManager();
INodeFile inode = fsdir.getINode(file.toString()).asFile();
Assert.assertNotNull(inode);
Assert.assertFalse("should not be UC", inode.isUnderConstruction());
Assert.assertNull("should not have a lease", lm.getLease(inode));
// make sure the quota usage is unchanged
final long newSpaceUsed = dirNode.getDirectoryWithQuotaFeature()
.getSpaceConsumed().getStorageSpace();
assertEquals(spaceUsed, newSpaceUsed);
// make sure edits aren't corrupted
dfs.recoverLease(file);
cluster.restartNameNodes();
}
/**
* Test truncate over quota does not mark file as UC or create a lease
*/
@Test (timeout=60000)
public void testTruncateOverQuota() throws Exception {
final Path dir = new Path("/TestTruncateOverquota");
final Path file = new Path(dir, "file");
// create partial block file
dfs.mkdirs(dir);
DFSTestUtil.createFile(dfs, file, BLOCKSIZE/2, REPLICATION, seed);
// lower quota to cause exception when appending to partial block
dfs.setQuota(dir, Long.MAX_VALUE - 1, 1);
final INodeDirectory dirNode = fsdir.getINode4Write(dir.toString())
.asDirectory();
final long spaceUsed = dirNode.getDirectoryWithQuotaFeature()
.getSpaceConsumed().getStorageSpace();
try {
dfs.truncate(file, BLOCKSIZE / 2 - 1);
Assert.fail("truncate didn't fail");
} catch (RemoteException e) {
assertTrue(e.getClassName().contains("DSQuotaExceededException"));
}
// check that the file exists, isn't UC, and has no dangling lease
LeaseManager lm = cluster.getNamesystem().getLeaseManager();
INodeFile inode = fsdir.getINode(file.toString()).asFile();
Assert.assertNotNull(inode);
Assert.assertFalse("should not be UC", inode.isUnderConstruction());
Assert.assertNull("should not have a lease", lm.getLease(inode));
// make sure the quota usage is unchanged
final long newSpaceUsed = dirNode.getDirectoryWithQuotaFeature()
.getSpaceConsumed().getStorageSpace();
assertEquals(spaceUsed, newSpaceUsed);
// make sure edits aren't corrupted
dfs.recoverLease(file);
cluster.restartNameNodes();
}
}
| 12,054 | 37.39172 | 110 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile;
import org.apache.hadoop.hdfs.tools.DFSAdmin;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.junit.Test;
/**
* This class tests the creation and validation of a checkpoint.
*/
public class TestCheckPointForSecurityTokens {
static final long seed = 0xDEADBEEFL;
static final int blockSize = 4096;
static final int fileSize = 8192;
static final int numDatanodes = 3;
short replication = 3;
MiniDFSCluster cluster = null;
private void cancelToken(Token<DelegationTokenIdentifier> token)
throws IOException {
cluster.getNamesystem().cancelDelegationToken(token);
}
private void renewToken(Token<DelegationTokenIdentifier> token)
throws IOException {
cluster.getNamesystem().renewDelegationToken(token);
}
/**
* Tests save namespace.
*/
@Test
public void testSaveNamespace() throws IOException {
DistributedFileSystem fs = null;
try {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(
DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
cluster.waitActive();
fs = cluster.getFileSystem();
FSNamesystem namesystem = cluster.getNamesystem();
String renewer = UserGroupInformation.getLoginUser().getUserName();
Token<DelegationTokenIdentifier> token1 = namesystem
.getDelegationToken(new Text(renewer));
Token<DelegationTokenIdentifier> token2 = namesystem
.getDelegationToken(new Text(renewer));
// Saving image without safe mode should fail
DFSAdmin admin = new DFSAdmin(conf);
String[] args = new String[]{"-saveNamespace"};
// verify that the edits file is NOT empty
NameNode nn = cluster.getNameNode();
for (StorageDirectory sd : nn.getFSImage().getStorage().dirIterable(null)) {
EditLogFile log = FSImageTestUtil.findLatestEditsLog(sd);
assertTrue(log.isInProgress());
log.validateLog();
long numTransactions = (log.getLastTxId() - log.getFirstTxId()) + 1;
assertEquals("In-progress log " + log + " should have 5 transactions",
5, numTransactions);;
}
// Saving image in safe mode should succeed
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
try {
admin.run(args);
} catch(Exception e) {
throw new IOException(e.getMessage());
}
// verify that the edits file is empty except for the START txn
for (StorageDirectory sd : nn.getFSImage().getStorage().dirIterable(null)) {
EditLogFile log = FSImageTestUtil.findLatestEditsLog(sd);
assertTrue(log.isInProgress());
log.validateLog();
long numTransactions = (log.getLastTxId() - log.getFirstTxId()) + 1;
assertEquals("In-progress log " + log + " should only have START txn",
1, numTransactions);
}
// restart cluster
cluster.shutdown();
cluster = null;
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
cluster.waitActive();
//Should be able to renew & cancel the delegation token after cluster restart
try {
renewToken(token1);
renewToken(token2);
} catch (IOException e) {
fail("Could not renew or cancel the token");
}
namesystem = cluster.getNamesystem();
Token<DelegationTokenIdentifier> token3 = namesystem
.getDelegationToken(new Text(renewer));
Token<DelegationTokenIdentifier> token4 = namesystem
.getDelegationToken(new Text(renewer));
// restart cluster again
cluster.shutdown();
cluster = null;
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
cluster.waitActive();
namesystem = cluster.getNamesystem();
Token<DelegationTokenIdentifier> token5 = namesystem
.getDelegationToken(new Text(renewer));
try {
renewToken(token1);
renewToken(token2);
renewToken(token3);
renewToken(token4);
renewToken(token5);
} catch (IOException e) {
fail("Could not renew or cancel the token");
}
// restart cluster again
cluster.shutdown();
cluster = null;
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
cluster.waitActive();
namesystem = cluster.getNamesystem();
try {
renewToken(token1);
cancelToken(token1);
renewToken(token2);
cancelToken(token2);
renewToken(token3);
cancelToken(token3);
renewToken(token4);
cancelToken(token4);
renewToken(token5);
cancelToken(token5);
} catch (IOException e) {
fail("Could not renew or cancel the token");
}
} finally {
if(fs != null) fs.close();
if(cluster!= null) cluster.shutdown();
}
}
}
| 6,652 | 34.962162 | 98 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFavoredNodesEndToEnd.java
|
/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.EnumSet;
import java.util.Random;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.log4j.Level;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestFavoredNodesEndToEnd {
{
((Log4JLogger)LogFactory.getLog(BlockPlacementPolicy.class)).getLogger().setLevel(Level.ALL);
}
private static MiniDFSCluster cluster;
private static Configuration conf;
private final static int NUM_DATA_NODES = 10;
private final static int NUM_FILES = 10;
private final static byte[] SOME_BYTES = new String("foo").getBytes();
private static DistributedFileSystem dfs;
private static ArrayList<DataNode> datanodes;
@BeforeClass
public static void setUpBeforeClass() throws Exception {
conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES)
.build();
cluster.waitClusterUp();
dfs = cluster.getFileSystem();
datanodes = cluster.getDataNodes();
}
@AfterClass
public static void tearDownAfterClass() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
@Test(timeout=180000)
public void testFavoredNodesEndToEnd() throws Exception {
//create 10 files with random preferred nodes
for (int i = 0; i < NUM_FILES; i++) {
Random rand = new Random(System.currentTimeMillis() + i);
//pass a new created rand so as to get a uniform distribution each time
//without too much collisions (look at the do-while loop in getDatanodes)
InetSocketAddress datanode[] = getDatanodes(rand);
Path p = new Path("/filename"+i);
FSDataOutputStream out = dfs.create(p, FsPermission.getDefault(), true,
4096, (short)3, 4096L, null, datanode);
out.write(SOME_BYTES);
out.close();
BlockLocation[] locations = getBlockLocations(p);
//verify the files got created in the right nodes
for (BlockLocation loc : locations) {
String[] hosts = loc.getNames();
String[] hosts1 = getStringForInetSocketAddrs(datanode);
assertTrue(compareNodes(hosts, hosts1));
}
}
}
@Test(timeout=180000)
public void testWhenFavoredNodesNotPresent() throws Exception {
//when we ask for favored nodes but the nodes are not there, we should
//get some other nodes. In other words, the write to hdfs should not fail
//and if we do getBlockLocations on the file, we should see one blklocation
//and three hosts for that
InetSocketAddress arbitraryAddrs[] = new InetSocketAddress[3];
for (int i = 0; i < 3; i++) {
arbitraryAddrs[i] = getArbitraryLocalHostAddr();
}
Path p = new Path("/filename-foo-bar");
FSDataOutputStream out = dfs.create(p, FsPermission.getDefault(), true,
4096, (short)3, 4096L, null, arbitraryAddrs);
out.write(SOME_BYTES);
out.close();
getBlockLocations(p);
}
@Test(timeout=180000)
public void testWhenSomeNodesAreNotGood() throws Exception {
// 4 favored nodes
final InetSocketAddress addrs[] = new InetSocketAddress[4];
final String[] hosts = new String[addrs.length];
for (int i = 0; i < addrs.length; i++) {
addrs[i] = datanodes.get(i).getXferAddress();
hosts[i] = addrs[i].getAddress().getHostAddress() + ":" + addrs[i].getPort();
}
//make some datanode not "good" so that even if the client prefers it,
//the namenode would not give it as a replica to write to
DatanodeInfo d = cluster.getNameNode().getNamesystem().getBlockManager()
.getDatanodeManager().getDatanodeByXferAddr(
addrs[0].getAddress().getHostAddress(), addrs[0].getPort());
//set the decommission status to true so that
//BlockPlacementPolicyDefault.isGoodTarget returns false for this dn
d.setDecommissioned();
Path p = new Path("/filename-foo-bar-baz");
final short replication = (short)3;
FSDataOutputStream out = dfs.create(p, FsPermission.getDefault(), true,
4096, replication, 4096L, null, addrs);
out.write(SOME_BYTES);
out.close();
//reset the state
d.stopDecommission();
BlockLocation[] locations = getBlockLocations(p);
Assert.assertEquals(replication, locations[0].getNames().length);;
//also make sure that the datanode[0] is not in the list of hosts
for (int i = 0; i < replication; i++) {
final String loc = locations[0].getNames()[i];
int j = 0;
for(; j < hosts.length && !loc.equals(hosts[j]); j++);
Assert.assertTrue("j=" + j, j > 0);
Assert.assertTrue("loc=" + loc + " not in host list "
+ Arrays.asList(hosts) + ", j=" + j, j < hosts.length);
}
}
@Test(timeout = 180000)
public void testFavoredNodesEndToEndForAppend() throws Exception {
// create 10 files with random preferred nodes
for (int i = 0; i < NUM_FILES; i++) {
Random rand = new Random(System.currentTimeMillis() + i);
// pass a new created rand so as to get a uniform distribution each time
// without too much collisions (look at the do-while loop in getDatanodes)
InetSocketAddress datanode[] = getDatanodes(rand);
Path p = new Path("/filename" + i);
// create and close the file.
dfs.create(p, FsPermission.getDefault(), true, 4096, (short) 3, 4096L,
null, null).close();
// re-open for append
FSDataOutputStream out = dfs.append(p, EnumSet.of(CreateFlag.APPEND),
4096, null, datanode);
out.write(SOME_BYTES);
out.close();
BlockLocation[] locations = getBlockLocations(p);
// verify the files got created in the right nodes
for (BlockLocation loc : locations) {
String[] hosts = loc.getNames();
String[] hosts1 = getStringForInetSocketAddrs(datanode);
assertTrue(compareNodes(hosts, hosts1));
}
}
}
private BlockLocation[] getBlockLocations(Path p) throws Exception {
DFSTestUtil.waitReplication(dfs, p, (short)3);
BlockLocation[] locations = dfs.getClient().getBlockLocations(
p.toUri().getPath(), 0, Long.MAX_VALUE);
assertTrue(locations.length == 1 && locations[0].getHosts().length == 3);
return locations;
}
private String[] getStringForInetSocketAddrs(InetSocketAddress[] datanode) {
String strs[] = new String[datanode.length];
for (int i = 0; i < datanode.length; i++) {
strs[i] = datanode[i].getAddress().getHostAddress() + ":" +
datanode[i].getPort();
}
return strs;
}
private boolean compareNodes(String[] dnList1, String[] dnList2) {
for (int i = 0; i < dnList1.length; i++) {
boolean matched = false;
for (int j = 0; j < dnList2.length; j++) {
if (dnList1[i].equals(dnList2[j])) {
matched = true;
break;
}
}
if (matched == false) {
fail(dnList1[i] + " not a favored node");
}
}
return true;
}
private InetSocketAddress[] getDatanodes(Random rand) {
//Get some unique random indexes
int idx1 = rand.nextInt(NUM_DATA_NODES);
int idx2;
do {
idx2 = rand.nextInt(NUM_DATA_NODES);
} while (idx1 == idx2);
int idx3;
do {
idx3 = rand.nextInt(NUM_DATA_NODES);
} while (idx2 == idx3 || idx1 == idx3);
InetSocketAddress[] addrs = new InetSocketAddress[3];
addrs[0] = datanodes.get(idx1).getXferAddress();
addrs[1] = datanodes.get(idx2).getXferAddress();
addrs[2] = datanodes.get(idx3).getXferAddress();
return addrs;
}
private InetSocketAddress getArbitraryLocalHostAddr()
throws UnknownHostException{
Random rand = new Random(System.currentTimeMillis());
int port = rand.nextInt(65535);
while (true) {
boolean conflict = false;
for (DataNode d : datanodes) {
if (d.getXferAddress().getPort() == port) {
port = rand.nextInt(65535);
conflict = true;
}
}
if (conflict == false) {
break;
}
}
return new InetSocketAddress(InetAddress.getLocalHost(), port);
}
}
| 9,814 | 36.17803 | 97 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTruncateQuotaUpdate.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.internal.util.reflection.Whitebox;
import java.util.ArrayList;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
/**
* Make sure we correctly update the quota usage for truncate.
* We need to cover the following cases:
* 1. No snapshot, truncate to 0
* 2. No snapshot, truncate at block boundary
* 3. No snapshot, not on block boundary
* 4~6. With snapshot, all the current blocks are included in latest
* snapshots, repeat 1~3
* 7~9. With snapshot, blocks in the latest snapshot and blocks in the current
* file diverged, repeat 1~3
*/
public class TestTruncateQuotaUpdate {
private static final int BLOCKSIZE = 1024;
private static final short REPLICATION = 4;
private long nextMockBlockId;
private long nextMockGenstamp;
private long nextMockINodeId;
@Test
public void testTruncateWithoutSnapshot() {
INodeFile file = createMockFile(BLOCKSIZE * 2 + BLOCKSIZE / 2, REPLICATION);
// case 1: first truncate to 1.5 blocks
// we truncate 1 blocks, but not on the boundary, thus the diff should
// be -block + (block - 0.5 block) = -0.5 block
QuotaCounts count = new QuotaCounts.Builder().build();
file.computeQuotaDeltaForTruncate(BLOCKSIZE + BLOCKSIZE / 2, null, count);
Assert.assertEquals(-BLOCKSIZE / 2 * REPLICATION, count.getStorageSpace());
// case 2: truncate to 1 block
count = new QuotaCounts.Builder().build();
file.computeQuotaDeltaForTruncate(BLOCKSIZE, null, count);
Assert.assertEquals(-(BLOCKSIZE + BLOCKSIZE / 2) * REPLICATION,
count.getStorageSpace());
// case 3: truncate to 0
count = new QuotaCounts.Builder().build();
file.computeQuotaDeltaForTruncate(0, null, count);
Assert.assertEquals(-(BLOCKSIZE * 2 + BLOCKSIZE / 2) * REPLICATION,
count.getStorageSpace());
}
@Test
public void testTruncateWithSnapshotNoDivergence() {
INodeFile file = createMockFile(BLOCKSIZE * 2 + BLOCKSIZE / 2, REPLICATION);
addSnapshotFeature(file, file.getBlocks());
// case 4: truncate to 1.5 blocks
// all the blocks are in snapshot. truncate need to allocate a new block
// diff should be +BLOCKSIZE
QuotaCounts count = new QuotaCounts.Builder().build();
file.computeQuotaDeltaForTruncate(BLOCKSIZE + BLOCKSIZE / 2, null, count);
Assert.assertEquals(BLOCKSIZE * REPLICATION, count.getStorageSpace());
// case 2: truncate to 1 block
count = new QuotaCounts.Builder().build();
file.computeQuotaDeltaForTruncate(BLOCKSIZE, null, count);
Assert.assertEquals(0, count.getStorageSpace());
// case 3: truncate to 0
count = new QuotaCounts.Builder().build();
file.computeQuotaDeltaForTruncate(0, null, count);
Assert.assertEquals(0, count.getStorageSpace());
}
@Test
public void testTruncateWithSnapshotAndDivergence() {
INodeFile file = createMockFile(BLOCKSIZE * 2 + BLOCKSIZE / 2, REPLICATION);
BlockInfo[] blocks = new BlockInfo
[file.getBlocks().length];
System.arraycopy(file.getBlocks(), 0, blocks, 0, blocks.length);
addSnapshotFeature(file, blocks);
// Update the last two blocks in the current inode
file.getBlocks()[1] = newBlock(BLOCKSIZE, REPLICATION);
file.getBlocks()[2] = newBlock(BLOCKSIZE / 2, REPLICATION);
// case 7: truncate to 1.5 block
// the block for truncation is not in snapshot, diff should be the same
// as case 1
QuotaCounts count = new QuotaCounts.Builder().build();
file.computeQuotaDeltaForTruncate(BLOCKSIZE + BLOCKSIZE / 2, null, count);
Assert.assertEquals(-BLOCKSIZE / 2 * REPLICATION, count.getStorageSpace());
// case 8: truncate to 2 blocks
// the original 2.5 blocks are in snapshot. the block truncated is not
// in snapshot. diff should be -0.5 block
count = new QuotaCounts.Builder().build();
file.computeQuotaDeltaForTruncate(BLOCKSIZE + BLOCKSIZE / 2, null, count);
Assert.assertEquals(-BLOCKSIZE / 2 * REPLICATION, count.getStorageSpace());
// case 9: truncate to 0
count = new QuotaCounts.Builder().build();
file.computeQuotaDeltaForTruncate(0, null, count);
Assert.assertEquals(-(BLOCKSIZE + BLOCKSIZE / 2) * REPLICATION, count
.getStorageSpace());
}
private INodeFile createMockFile(long size, short replication) {
ArrayList<BlockInfo> blocks = new ArrayList<>();
long createdSize = 0;
while (createdSize < size) {
long blockSize = Math.min(BLOCKSIZE, size - createdSize);
BlockInfo bi = newBlock(blockSize, replication);
blocks.add(bi);
createdSize += BLOCKSIZE;
}
PermissionStatus perm = new PermissionStatus("foo", "bar", FsPermission
.createImmutable((short) 0x1ff));
return new INodeFile(
++nextMockINodeId, new byte[0], perm, 0, 0,
blocks.toArray(new BlockInfo[blocks.size()]), replication,
BLOCKSIZE);
}
private BlockInfo newBlock(long size, short replication) {
Block b = new Block(++nextMockBlockId, size, ++nextMockGenstamp);
return new BlockInfoContiguous(b, replication);
}
private static void addSnapshotFeature(INodeFile file, BlockInfo[] blocks) {
FileDiff diff = mock(FileDiff.class);
when(diff.getBlocks()).thenReturn(blocks);
FileDiffList diffList = new FileDiffList();
@SuppressWarnings("unchecked")
ArrayList<FileDiff> diffs = ((ArrayList<FileDiff>)Whitebox.getInternalState
(diffList, "diffs"));
diffs.add(diff);
FileWithSnapshotFeature sf = new FileWithSnapshotFeature(diffList);
file.addFeature(sf);
}
}
| 7,016 | 41.017964 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlock.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertEquals;
import java.io.IOException;
import java.util.EnumSet;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSOutputStream;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
/**
* Test AddBlockOp is written and read correctly
*/
public class TestAddBlock {
private static final short REPLICATION = 3;
private static final int BLOCKSIZE = 1024;
private MiniDFSCluster cluster;
private Configuration conf;
@Before
public void setup() throws IOException {
conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
.build();
cluster.waitActive();
}
@After
public void tearDown() {
if (cluster != null) {
cluster.shutdown();
}
}
/**
* Test adding new blocks. Restart the NameNode in the test to make sure the
* AddBlockOp in the editlog is applied correctly.
*/
@Test
public void testAddBlock() throws Exception {
DistributedFileSystem fs = cluster.getFileSystem();
final Path file1 = new Path("/file1");
final Path file2 = new Path("/file2");
final Path file3 = new Path("/file3");
final Path file4 = new Path("/file4");
DFSTestUtil.createFile(fs, file1, BLOCKSIZE - 1, REPLICATION, 0L);
DFSTestUtil.createFile(fs, file2, BLOCKSIZE, REPLICATION, 0L);
DFSTestUtil.createFile(fs, file3, BLOCKSIZE * 2 - 1, REPLICATION, 0L);
DFSTestUtil.createFile(fs, file4, BLOCKSIZE * 2, REPLICATION, 0L);
// restart NameNode
cluster.restartNameNode(true);
FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
// check file1
INodeFile file1Node = fsdir.getINode4Write(file1.toString()).asFile();
BlockInfo[] file1Blocks = file1Node.getBlocks();
assertEquals(1, file1Blocks.length);
assertEquals(BLOCKSIZE - 1, file1Blocks[0].getNumBytes());
assertEquals(BlockUCState.COMPLETE, file1Blocks[0].getBlockUCState());
// check file2
INodeFile file2Node = fsdir.getINode4Write(file2.toString()).asFile();
BlockInfo[] file2Blocks = file2Node.getBlocks();
assertEquals(1, file2Blocks.length);
assertEquals(BLOCKSIZE, file2Blocks[0].getNumBytes());
assertEquals(BlockUCState.COMPLETE, file2Blocks[0].getBlockUCState());
// check file3
INodeFile file3Node = fsdir.getINode4Write(file3.toString()).asFile();
BlockInfo[] file3Blocks = file3Node.getBlocks();
assertEquals(2, file3Blocks.length);
assertEquals(BLOCKSIZE, file3Blocks[0].getNumBytes());
assertEquals(BlockUCState.COMPLETE, file3Blocks[0].getBlockUCState());
assertEquals(BLOCKSIZE - 1, file3Blocks[1].getNumBytes());
assertEquals(BlockUCState.COMPLETE, file3Blocks[1].getBlockUCState());
// check file4
INodeFile file4Node = fsdir.getINode4Write(file4.toString()).asFile();
BlockInfo[] file4Blocks = file4Node.getBlocks();
assertEquals(2, file4Blocks.length);
assertEquals(BLOCKSIZE, file4Blocks[0].getNumBytes());
assertEquals(BlockUCState.COMPLETE, file4Blocks[0].getBlockUCState());
assertEquals(BLOCKSIZE, file4Blocks[1].getNumBytes());
assertEquals(BlockUCState.COMPLETE, file4Blocks[1].getBlockUCState());
}
/**
* Test adding new blocks but without closing the corresponding the file
*/
@Test
public void testAddBlockUC() throws Exception {
DistributedFileSystem fs = cluster.getFileSystem();
final Path file1 = new Path("/file1");
DFSTestUtil.createFile(fs, file1, BLOCKSIZE - 1, REPLICATION, 0L);
FSDataOutputStream out = null;
try {
// append files without closing the streams
out = fs.append(file1);
String appendContent = "appending-content";
out.writeBytes(appendContent);
((DFSOutputStream) out.getWrappedStream()).hsync(
EnumSet.of(SyncFlag.UPDATE_LENGTH));
// restart NN
cluster.restartNameNode(true);
FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
INodeFile fileNode = fsdir.getINode4Write(file1.toString()).asFile();
BlockInfo[] fileBlocks = fileNode.getBlocks();
assertEquals(2, fileBlocks.length);
assertEquals(BLOCKSIZE, fileBlocks[0].getNumBytes());
assertEquals(BlockUCState.COMPLETE, fileBlocks[0].getBlockUCState());
assertEquals(appendContent.length() - 1, fileBlocks[1].getNumBytes());
assertEquals(BlockUCState.UNDER_CONSTRUCTION,
fileBlocks[1].getBlockUCState());
} finally {
if (out != null) {
out.close();
}
}
}
}
| 5,999 | 36.974684 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetBlockLocations.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.commons.io.Charsets;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.junit.Test;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import java.io.IOException;
import java.util.ArrayList;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY;
import static org.apache.hadoop.util.Time.now;
import static org.mockito.Mockito.anyLong;
import static org.mockito.Mockito.anyString;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
public class TestGetBlockLocations {
private static final String FILE_NAME = "foo";
private static final String FILE_PATH = "/" + FILE_NAME;
private static final long MOCK_INODE_ID = 16386;
private static final String RESERVED_PATH =
"/.reserved/.inodes/" + MOCK_INODE_ID;
@Test(timeout = 30000)
public void testResolveReservedPath() throws IOException {
FSNamesystem fsn = setupFileSystem();
FSEditLog editlog = fsn.getEditLog();
fsn.getBlockLocations("dummy", RESERVED_PATH, 0, 1024);
verify(editlog).logTimes(eq(FILE_PATH), anyLong(), anyLong());
fsn.close();
}
@Test(timeout = 30000)
public void testGetBlockLocationsRacingWithDelete() throws IOException {
FSNamesystem fsn = spy(setupFileSystem());
final FSDirectory fsd = fsn.getFSDirectory();
FSEditLog editlog = fsn.getEditLog();
doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
INodesInPath iip = fsd.getINodesInPath(FILE_PATH, true);
FSDirDeleteOp.delete(fsd, iip, new INode.BlocksMapUpdateInfo(),
new ArrayList<INode>(), new ArrayList<Long>(),
now());
invocation.callRealMethod();
return null;
}
}).when(fsn).writeLock();
fsn.getBlockLocations("dummy", RESERVED_PATH, 0, 1024);
verify(editlog, never()).logTimes(anyString(), anyLong(), anyLong());
fsn.close();
}
@Test(timeout = 30000)
public void testGetBlockLocationsRacingWithRename() throws IOException {
FSNamesystem fsn = spy(setupFileSystem());
final FSDirectory fsd = fsn.getFSDirectory();
FSEditLog editlog = fsn.getEditLog();
final String DST_PATH = "/bar";
final boolean[] renamed = new boolean[1];
doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
invocation.callRealMethod();
if (!renamed[0]) {
FSDirRenameOp.renameTo(fsd, fsd.getPermissionChecker(), FILE_PATH,
DST_PATH, new INode.BlocksMapUpdateInfo(),
false);
renamed[0] = true;
}
return null;
}
}).when(fsn).writeLock();
fsn.getBlockLocations("dummy", RESERVED_PATH, 0, 1024);
verify(editlog).logTimes(eq(DST_PATH), anyLong(), anyLong());
fsn.close();
}
private static FSNamesystem setupFileSystem() throws IOException {
Configuration conf = new Configuration();
conf.setLong(DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 1L);
FSEditLog editlog = mock(FSEditLog.class);
FSImage image = mock(FSImage.class);
when(image.getEditLog()).thenReturn(editlog);
final FSNamesystem fsn = new FSNamesystem(conf, image, true);
final FSDirectory fsd = fsn.getFSDirectory();
INodesInPath iip = fsd.getINodesInPath("/", true);
PermissionStatus perm = new PermissionStatus(
"hdfs", "supergroup",
FsPermission.createImmutable((short) 0x1ff));
final INodeFile file = new INodeFile(
MOCK_INODE_ID, FILE_NAME.getBytes(Charsets.UTF_8),
perm, 1, 1, new BlockInfo[] {}, (short) 1,
DFS_BLOCK_SIZE_DEFAULT);
fsn.getFSDirectory().addINode(iip, file);
return fsn;
}
}
| 5,163 | 37.251852 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.FileNotFoundException;
import java.util.AbstractMap;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.AppendTestUtil;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.Node;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.Mockito;
import org.mockito.internal.util.reflection.Whitebox;
/**
* Test race between delete and other operations. For now only addBlock()
* is tested since all others are acquiring FSNamesystem lock for the
* whole duration.
*/
public class TestDeleteRace {
private static final int BLOCK_SIZE = 4096;
private static final Log LOG = LogFactory.getLog(TestDeleteRace.class);
private static final Configuration conf = new HdfsConfiguration();
private MiniDFSCluster cluster;
@Test
public void testDeleteAddBlockRace() throws Exception {
testDeleteAddBlockRace(false);
}
@Test
public void testDeleteAddBlockRaceWithSnapshot() throws Exception {
testDeleteAddBlockRace(true);
}
private void testDeleteAddBlockRace(boolean hasSnapshot) throws Exception {
try {
conf.setClass(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY,
SlowBlockPlacementPolicy.class, BlockPlacementPolicy.class);
cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = cluster.getFileSystem();
final String fileName = "/testDeleteAddBlockRace";
Path filePath = new Path(fileName);
FSDataOutputStream out = null;
out = fs.create(filePath);
if (hasSnapshot) {
SnapshotTestHelper.createSnapshot((DistributedFileSystem) fs, new Path(
"/"), "s1");
}
Thread deleteThread = new DeleteThread(fs, filePath);
deleteThread.start();
try {
// write data and syn to make sure a block is allocated.
out.write(new byte[32], 0, 32);
out.hsync();
Assert.fail("Should have failed.");
} catch (FileNotFoundException e) {
GenericTestUtils.assertExceptionContains(filePath.getName(), e);
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
private static class SlowBlockPlacementPolicy extends
BlockPlacementPolicyDefault {
@Override
public DatanodeStorageInfo[] chooseTarget(String srcPath,
int numOfReplicas,
Node writer,
List<DatanodeStorageInfo> chosenNodes,
boolean returnChosenNodes,
Set<Node> excludedNodes,
long blocksize,
final BlockStoragePolicy storagePolicy) {
DatanodeStorageInfo[] results = super.chooseTarget(srcPath,
numOfReplicas, writer, chosenNodes, returnChosenNodes, excludedNodes,
blocksize, storagePolicy);
try {
Thread.sleep(3000);
} catch (InterruptedException e) {}
return results;
}
}
private class DeleteThread extends Thread {
private FileSystem fs;
private Path path;
DeleteThread(FileSystem fs, Path path) {
this.fs = fs;
this.path = path;
}
@Override
public void run() {
try {
Thread.sleep(1000);
LOG.info("Deleting" + path);
final FSDirectory fsdir = cluster.getNamesystem().dir;
INode fileINode = fsdir.getINode4Write(path.toString());
INodeMap inodeMap = (INodeMap) Whitebox.getInternalState(fsdir,
"inodeMap");
fs.delete(path, false);
// after deletion, add the inode back to the inodeMap
inodeMap.put(fileINode);
LOG.info("Deleted" + path);
} catch (Exception e) {
LOG.info(e);
}
}
}
private class RenameThread extends Thread {
private FileSystem fs;
private Path from;
private Path to;
RenameThread(FileSystem fs, Path from, Path to) {
this.fs = fs;
this.from = from;
this.to = to;
}
@Override
public void run() {
try {
Thread.sleep(1000);
LOG.info("Renaming " + from + " to " + to);
fs.rename(from, to);
LOG.info("Renamed " + from + " to " + to);
} catch (Exception e) {
LOG.info(e);
}
}
}
@Test
public void testRenameRace() throws Exception {
try {
conf.setClass(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY,
SlowBlockPlacementPolicy.class, BlockPlacementPolicy.class);
cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = cluster.getFileSystem();
Path dirPath1 = new Path("/testRenameRace1");
Path dirPath2 = new Path("/testRenameRace2");
Path filePath = new Path("/testRenameRace1/file1");
fs.mkdirs(dirPath1);
FSDataOutputStream out = fs.create(filePath);
Thread renameThread = new RenameThread(fs, dirPath1, dirPath2);
renameThread.start();
// write data and close to make sure a block is allocated.
out.write(new byte[32], 0, 32);
out.close();
// Restart name node so that it replays edit. If old path was
// logged in edit, it will fail to come up.
cluster.restartNameNode(0);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
/**
* Test race between delete operation and commitBlockSynchronization method.
* See HDFS-6825.
* @param hasSnapshot
* @throws Exception
*/
private void testDeleteAndCommitBlockSynchronizationRace(boolean hasSnapshot)
throws Exception {
LOG.info("Start testing, hasSnapshot: " + hasSnapshot);
ArrayList<AbstractMap.SimpleImmutableEntry<String, Boolean>> testList =
new ArrayList<AbstractMap.SimpleImmutableEntry<String, Boolean>> ();
testList.add(
new AbstractMap.SimpleImmutableEntry<String, Boolean>("/test-file", false));
testList.add(
new AbstractMap.SimpleImmutableEntry<String, Boolean>("/test-file1", true));
testList.add(
new AbstractMap.SimpleImmutableEntry<String, Boolean>(
"/testdir/testdir1/test-file", false));
testList.add(
new AbstractMap.SimpleImmutableEntry<String, Boolean>(
"/testdir/testdir1/test-file1", true));
final Path rootPath = new Path("/");
final Configuration conf = new Configuration();
// Disable permissions so that another user can recover the lease.
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
FSDataOutputStream stm = null;
Map<DataNode, DatanodeProtocolClientSideTranslatorPB> dnMap =
new HashMap<DataNode, DatanodeProtocolClientSideTranslatorPB>();
try {
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(3)
.build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
int stId = 0;
for(AbstractMap.SimpleImmutableEntry<String, Boolean> stest : testList) {
String testPath = stest.getKey();
Boolean mkSameDir = stest.getValue();
LOG.info("test on " + testPath + " mkSameDir: " + mkSameDir
+ " snapshot: " + hasSnapshot);
Path fPath = new Path(testPath);
//find grandest non-root parent
Path grandestNonRootParent = fPath;
while (!grandestNonRootParent.getParent().equals(rootPath)) {
grandestNonRootParent = grandestNonRootParent.getParent();
}
stm = fs.create(fPath);
LOG.info("test on " + testPath + " created " + fPath);
// write a half block
AppendTestUtil.write(stm, 0, BLOCK_SIZE / 2);
stm.hflush();
if (hasSnapshot) {
SnapshotTestHelper.createSnapshot(fs, rootPath,
"st" + String.valueOf(stId));
++stId;
}
// Look into the block manager on the active node for the block
// under construction.
NameNode nn = cluster.getNameNode();
ExtendedBlock blk = DFSTestUtil.getFirstBlock(fs, fPath);
DatanodeDescriptor expectedPrimary =
DFSTestUtil.getExpectedPrimaryNode(nn, blk);
LOG.info("Expecting block recovery to be triggered on DN " +
expectedPrimary);
// Find the corresponding DN daemon, and spy on its connection to the
// active.
DataNode primaryDN = cluster.getDataNode(expectedPrimary.getIpcPort());
DatanodeProtocolClientSideTranslatorPB nnSpy = dnMap.get(primaryDN);
if (nnSpy == null) {
nnSpy = DataNodeTestUtils.spyOnBposToNN(primaryDN, nn);
dnMap.put(primaryDN, nnSpy);
}
// Delay the commitBlockSynchronization call
DelayAnswer delayer = new DelayAnswer(LOG);
Mockito.doAnswer(delayer).when(nnSpy).commitBlockSynchronization(
Mockito.eq(blk),
Mockito.anyInt(), // new genstamp
Mockito.anyLong(), // new length
Mockito.eq(true), // close file
Mockito.eq(false), // delete block
(DatanodeID[]) Mockito.anyObject(), // new targets
(String[]) Mockito.anyObject()); // new target storages
fs.recoverLease(fPath);
LOG.info("Waiting for commitBlockSynchronization call from primary");
delayer.waitForCall();
LOG.info("Deleting recursively " + grandestNonRootParent);
fs.delete(grandestNonRootParent, true);
if (mkSameDir && !grandestNonRootParent.toString().equals(testPath)) {
LOG.info("Recreate dir " + grandestNonRootParent + " testpath: "
+ testPath);
fs.mkdirs(grandestNonRootParent);
}
delayer.proceed();
LOG.info("Now wait for result");
delayer.waitForResult();
Throwable t = delayer.getThrown();
if (t != null) {
LOG.info("Result exception (snapshot: " + hasSnapshot + "): " + t);
}
} // end of loop each fPath
LOG.info("Now check we can restart");
cluster.restartNameNodes();
LOG.info("Restart finished");
} finally {
if (stm != null) {
IOUtils.closeStream(stm);
}
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test(timeout=600000)
public void testDeleteAndCommitBlockSynchonizationRaceNoSnapshot()
throws Exception {
testDeleteAndCommitBlockSynchronizationRace(false);
}
@Test(timeout=600000)
public void testDeleteAndCommitBlockSynchronizationRaceHasSnapshot()
throws Exception {
testDeleteAndCommitBlockSynchronizationRace(true);
}
}
| 12,971 | 35.133705 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getFinalizedEditsFileName;
import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName;
import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.IOException;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.junit.Test;
public class TestFSImageStorageInspector {
/**
* Simple test with image, edits, and inprogress edits
*/
@Test
public void testCurrentStorageInspector() throws IOException {
FSImageTransactionalStorageInspector inspector =
new FSImageTransactionalStorageInspector();
StorageDirectory mockDir = FSImageTestUtil.mockStorageDirectory(
NameNodeDirType.IMAGE_AND_EDITS,
false,
"/foo/current/" + getImageFileName(123),
"/foo/current/" + getFinalizedEditsFileName(123, 456),
"/foo/current/" + getImageFileName(456),
"/foo/current/" + getInProgressEditsFileName(457));
inspector.inspectDirectory(mockDir);
assertEquals(2, inspector.foundImages.size());
FSImageFile latestImage = inspector.getLatestImages().get(0);
assertEquals(456, latestImage.txId);
assertSame(mockDir, latestImage.sd);
assertTrue(inspector.isUpgradeFinalized());
assertEquals(new File("/foo/current/"+getImageFileName(456)),
latestImage.getFile());
}
}
| 2,592 | 39.515625 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.util.Collection;
import java.util.List;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.hdfs.BlockMissingException;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.client.impl.CorruptFileBlockIterator;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.util.StringUtils;
import org.junit.Test;
import org.slf4j.Logger;
/**
* This class tests the listCorruptFileBlocks API.
* We create 3 files; intentionally delete their blocks
* Use listCorruptFileBlocks to validate that we get the list of corrupt
* files/blocks; also test the "paging" support by calling the API
* with a block # from a previous call and validate that the subsequent
* blocks/files are also returned.
*/
public class TestListCorruptFileBlocks {
static final Logger LOG = NameNode.stateChangeLog;
/** check if nn.getCorruptFiles() returns a file that has corrupted blocks */
@Test (timeout=300000)
public void testListCorruptFilesCorruptedBlock() throws Exception {
MiniDFSCluster cluster = null;
Random random = new Random();
try {
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1); // datanode scans directories
conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 3 * 1000); // datanode sends block reports
// Set short retry timeouts so this test runs faster
conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = cluster.getFileSystem();
// create two files with one block each
DFSTestUtil util = new DFSTestUtil.Builder().
setName("testCorruptFilesCorruptedBlock").setNumFiles(2).
setMaxLevels(1).setMaxSize(512).build();
util.createFiles(fs, "/srcdat10");
// fetch bad file list from namenode. There should be none.
final NameNode namenode = cluster.getNameNode();
Collection<FSNamesystem.CorruptFileBlockInfo> badFiles = namenode.
getNamesystem().listCorruptFileBlocks("/", null);
assertTrue("Namenode has " + badFiles.size()
+ " corrupt files. Expecting None.", badFiles.size() == 0);
// Now deliberately corrupt one block
String bpid = cluster.getNamesystem().getBlockPoolId();
File storageDir = cluster.getInstanceStorageDir(0, 1);
File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
assertTrue("data directory does not exist", data_dir.exists());
List<File> metaFiles = MiniDFSCluster.getAllBlockMetadataFiles(data_dir);
assertTrue("Data directory does not contain any blocks or there was an "
+ "IO error", metaFiles != null && !metaFiles.isEmpty());
File metaFile = metaFiles.get(0);
RandomAccessFile file = new RandomAccessFile(metaFile, "rw");
FileChannel channel = file.getChannel();
long position = channel.size() - 2;
int length = 2;
byte[] buffer = new byte[length];
random.nextBytes(buffer);
channel.write(ByteBuffer.wrap(buffer), position);
file.close();
LOG.info("Deliberately corrupting file " + metaFile.getName() +
" at offset " + position + " length " + length);
// read all files to trigger detection of corrupted replica
try {
util.checkFiles(fs, "/srcdat10");
} catch (BlockMissingException e) {
System.out.println("Received BlockMissingException as expected.");
} catch (IOException e) {
assertTrue("Corrupted replicas not handled properly. Expecting BlockMissingException " +
" but received IOException " + e, false);
}
// fetch bad file list from namenode. There should be one file.
badFiles = namenode.getNamesystem().listCorruptFileBlocks("/", null);
LOG.info("Namenode has bad files. " + badFiles.size());
assertTrue("Namenode has " + badFiles.size() + " bad files. Expecting 1.",
badFiles.size() == 1);
util.cleanup(fs, "/srcdat10");
} finally {
if (cluster != null) { cluster.shutdown(); }
}
}
/**
* Check that listCorruptFileBlocks works while the namenode is still in safemode.
*/
@Test (timeout=300000)
public void testListCorruptFileBlocksInSafeMode() throws Exception {
MiniDFSCluster cluster = null;
Random random = new Random();
try {
Configuration conf = new HdfsConfiguration();
// datanode scans directories
conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
// datanode sends block reports
conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 3 * 1000);
// never leave safemode automatically
conf.setFloat(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY,
1.5f);
// start populating repl queues immediately
conf.setFloat(DFSConfigKeys.DFS_NAMENODE_REPL_QUEUE_THRESHOLD_PCT_KEY,
0f);
// Set short retry timeouts so this test runs faster
conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
cluster = new MiniDFSCluster.Builder(conf).waitSafeMode(false).build();
cluster.getNameNodeRpc().setSafeMode(
HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, false);
FileSystem fs = cluster.getFileSystem();
// create two files with one block each
DFSTestUtil util = new DFSTestUtil.Builder().
setName("testListCorruptFileBlocksInSafeMode").setNumFiles(2).
setMaxLevels(1).setMaxSize(512).build();
util.createFiles(fs, "/srcdat10");
// fetch bad file list from namenode. There should be none.
Collection<FSNamesystem.CorruptFileBlockInfo> badFiles =
cluster.getNameNode().getNamesystem().listCorruptFileBlocks("/", null);
assertTrue("Namenode has " + badFiles.size()
+ " corrupt files. Expecting None.", badFiles.size() == 0);
// Now deliberately corrupt one block
File storageDir = cluster.getInstanceStorageDir(0, 0);
File data_dir = MiniDFSCluster.getFinalizedDir(storageDir,
cluster.getNamesystem().getBlockPoolId());
assertTrue("data directory does not exist", data_dir.exists());
List<File> metaFiles = MiniDFSCluster.getAllBlockMetadataFiles(data_dir);
assertTrue("Data directory does not contain any blocks or there was an "
+ "IO error", metaFiles != null && !metaFiles.isEmpty());
File metaFile = metaFiles.get(0);
RandomAccessFile file = new RandomAccessFile(metaFile, "rw");
FileChannel channel = file.getChannel();
long position = channel.size() - 2;
int length = 2;
byte[] buffer = new byte[length];
random.nextBytes(buffer);
channel.write(ByteBuffer.wrap(buffer), position);
file.close();
LOG.info("Deliberately corrupting file " + metaFile.getName() +
" at offset " + position + " length " + length);
// read all files to trigger detection of corrupted replica
try {
util.checkFiles(fs, "/srcdat10");
} catch (BlockMissingException e) {
System.out.println("Received BlockMissingException as expected.");
} catch (IOException e) {
assertTrue("Corrupted replicas not handled properly. " +
"Expecting BlockMissingException " +
" but received IOException " + e, false);
}
// fetch bad file list from namenode. There should be one file.
badFiles = cluster.getNameNode().getNamesystem().
listCorruptFileBlocks("/", null);
LOG.info("Namenode has bad files. " + badFiles.size());
assertTrue("Namenode has " + badFiles.size() + " bad files. Expecting 1.",
badFiles.size() == 1);
// restart namenode
cluster.restartNameNode(0);
fs = cluster.getFileSystem();
// wait until replication queues have been initialized
while (!cluster.getNameNode().namesystem.isPopulatingReplQueues()) {
try {
LOG.info("waiting for replication queues");
Thread.sleep(1000);
} catch (InterruptedException ignore) {
}
}
// read all files to trigger detection of corrupted replica
try {
util.checkFiles(fs, "/srcdat10");
} catch (BlockMissingException e) {
System.out.println("Received BlockMissingException as expected.");
} catch (IOException e) {
assertTrue("Corrupted replicas not handled properly. " +
"Expecting BlockMissingException " +
" but received IOException " + e, false);
}
// fetch bad file list from namenode. There should be one file.
badFiles = cluster.getNameNode().getNamesystem().
listCorruptFileBlocks("/", null);
LOG.info("Namenode has bad files. " + badFiles.size());
assertTrue("Namenode has " + badFiles.size() + " bad files. Expecting 1.",
badFiles.size() == 1);
// check that we are still in safe mode
assertTrue("Namenode is not in safe mode",
cluster.getNameNode().isInSafeMode());
// now leave safe mode so that we can clean up
cluster.getNameNodeRpc().setSafeMode(
HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, false);
util.cleanup(fs, "/srcdat10");
} catch (Exception e) {
LOG.error(StringUtils.stringifyException(e));
throw e;
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
// deliberately remove blocks from a file and validate the list-corrupt-file-blocks API
@Test (timeout=300000)
public void testlistCorruptFileBlocks() throws Exception {
Configuration conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1); // datanode scans
// directories
FileSystem fs = null;
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
fs = cluster.getFileSystem();
DFSTestUtil util = new DFSTestUtil.Builder().
setName("testGetCorruptFiles").setNumFiles(3).setMaxLevels(1).
setMaxSize(1024).build();
util.createFiles(fs, "/corruptData");
final NameNode namenode = cluster.getNameNode();
Collection<FSNamesystem.CorruptFileBlockInfo> corruptFileBlocks =
namenode.getNamesystem().listCorruptFileBlocks("/corruptData", null);
int numCorrupt = corruptFileBlocks.size();
assertTrue(numCorrupt == 0);
// delete the blocks
String bpid = cluster.getNamesystem().getBlockPoolId();
for (int i = 0; i < 4; i++) {
for (int j = 0; j <= 1; j++) {
File storageDir = cluster.getInstanceStorageDir(i, j);
File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(
data_dir);
if (metadataFiles == null)
continue;
// assertTrue("Blocks do not exist in data-dir", (blocks != null) &&
// (blocks.length > 0));
for (File metadataFile : metadataFiles) {
File blockFile = Block.metaToBlockFile(metadataFile);
LOG.info("Deliberately removing file " + blockFile.getName());
assertTrue("Cannot remove file.", blockFile.delete());
LOG.info("Deliberately removing file " + metadataFile.getName());
assertTrue("Cannot remove file.", metadataFile.delete());
// break;
}
}
}
int count = 0;
corruptFileBlocks = namenode.getNamesystem().
listCorruptFileBlocks("/corruptData", null);
numCorrupt = corruptFileBlocks.size();
while (numCorrupt < 3) {
Thread.sleep(1000);
corruptFileBlocks = namenode.getNamesystem()
.listCorruptFileBlocks("/corruptData", null);
numCorrupt = corruptFileBlocks.size();
count++;
if (count > 30)
break;
}
// Validate we get all the corrupt files
LOG.info("Namenode has bad files. " + numCorrupt);
assertTrue(numCorrupt == 3);
// test the paging here
FSNamesystem.CorruptFileBlockInfo[] cfb = corruptFileBlocks
.toArray(new FSNamesystem.CorruptFileBlockInfo[0]);
// now get the 2nd and 3rd file that is corrupt
String[] cookie = new String[]{"1"};
Collection<FSNamesystem.CorruptFileBlockInfo> nextCorruptFileBlocks =
namenode.getNamesystem()
.listCorruptFileBlocks("/corruptData", cookie);
FSNamesystem.CorruptFileBlockInfo[] ncfb = nextCorruptFileBlocks
.toArray(new FSNamesystem.CorruptFileBlockInfo[0]);
numCorrupt = nextCorruptFileBlocks.size();
assertTrue(numCorrupt == 2);
assertTrue(ncfb[0].block.getBlockName()
.equalsIgnoreCase(cfb[1].block.getBlockName()));
corruptFileBlocks =
namenode.getNamesystem()
.listCorruptFileBlocks("/corruptData", cookie);
numCorrupt = corruptFileBlocks.size();
assertTrue(numCorrupt == 0);
// Do a listing on a dir which doesn't have any corrupt blocks and
// validate
util.createFiles(fs, "/goodData");
corruptFileBlocks =
namenode.getNamesystem().listCorruptFileBlocks("/goodData", null);
numCorrupt = corruptFileBlocks.size();
assertTrue(numCorrupt == 0);
util.cleanup(fs, "/corruptData");
util.cleanup(fs, "/goodData");
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
private int countPaths(RemoteIterator<Path> iter) throws IOException {
int i = 0;
while (iter.hasNext()) {
LOG.info("PATH: " + iter.next().toUri().getPath());
i++;
}
return i;
}
/**
* test listCorruptFileBlocks in DistributedFileSystem
*/
@Test (timeout=300000)
public void testlistCorruptFileBlocksDFS() throws Exception {
Configuration conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1); // datanode scans
// directories
FileSystem fs = null;
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
fs = cluster.getFileSystem();
DistributedFileSystem dfs = (DistributedFileSystem) fs;
DFSTestUtil util = new DFSTestUtil.Builder().
setName("testGetCorruptFiles").setNumFiles(3).
setMaxLevels(1).setMaxSize(1024).build();
util.createFiles(fs, "/corruptData");
RemoteIterator<Path> corruptFileBlocks =
dfs.listCorruptFileBlocks(new Path("/corruptData"));
int numCorrupt = countPaths(corruptFileBlocks);
assertTrue(numCorrupt == 0);
// delete the blocks
String bpid = cluster.getNamesystem().getBlockPoolId();
// For loop through number of datadirectories per datanode (2)
for (int i = 0; i < 2; i++) {
File storageDir = cluster.getInstanceStorageDir(0, i);
File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(
data_dir);
if (metadataFiles == null)
continue;
// assertTrue("Blocks do not exist in data-dir", (blocks != null) &&
// (blocks.length > 0));
for (File metadataFile : metadataFiles) {
File blockFile = Block.metaToBlockFile(metadataFile);
LOG.info("Deliberately removing file " + blockFile.getName());
assertTrue("Cannot remove file.", blockFile.delete());
LOG.info("Deliberately removing file " + metadataFile.getName());
assertTrue("Cannot remove file.", metadataFile.delete());
// break;
}
}
int count = 0;
corruptFileBlocks = dfs.listCorruptFileBlocks(new Path("/corruptData"));
numCorrupt = countPaths(corruptFileBlocks);
while (numCorrupt < 3) {
Thread.sleep(1000);
corruptFileBlocks = dfs.listCorruptFileBlocks(new Path("/corruptData"));
numCorrupt = countPaths(corruptFileBlocks);
count++;
if (count > 30)
break;
}
// Validate we get all the corrupt files
LOG.info("Namenode has bad files. " + numCorrupt);
assertTrue(numCorrupt == 3);
util.cleanup(fs, "/corruptData");
util.cleanup(fs, "/goodData");
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
/**
* Test if NN.listCorruptFiles() returns the right number of results.
* The corrupt blocks are detected by the BlockPoolSliceScanner.
* Also, test that DFS.listCorruptFileBlocks can make multiple successive
* calls.
*/
@Test (timeout=300000)
public void testMaxCorruptFiles() throws Exception {
MiniDFSCluster cluster = null;
try {
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 3 * 1000); // datanode sends block reports
cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = cluster.getFileSystem();
final int maxCorruptFileBlocks =
FSNamesystem.DEFAULT_MAX_CORRUPT_FILEBLOCKS_RETURNED;
// create 110 files with one block each
DFSTestUtil util = new DFSTestUtil.Builder().setName("testMaxCorruptFiles").
setNumFiles(maxCorruptFileBlocks * 3).setMaxLevels(1).setMaxSize(512).
build();
util.createFiles(fs, "/srcdat2", (short) 1);
util.waitReplication(fs, "/srcdat2", (short) 1);
// verify that there are no bad blocks.
final NameNode namenode = cluster.getNameNode();
Collection<FSNamesystem.CorruptFileBlockInfo> badFiles = namenode.
getNamesystem().listCorruptFileBlocks("/srcdat2", null);
assertTrue("Namenode has " + badFiles.size() + " corrupt files. Expecting none.",
badFiles.size() == 0);
// Now deliberately blocks from all files
final String bpid = cluster.getNamesystem().getBlockPoolId();
for (int i=0; i<4; i++) {
for (int j=0; j<=1; j++) {
File storageDir = cluster.getInstanceStorageDir(i, j);
File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
LOG.info("Removing files from " + data_dir);
List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(
data_dir);
if (metadataFiles == null)
continue;
for (File metadataFile : metadataFiles) {
File blockFile = Block.metaToBlockFile(metadataFile);
assertTrue("Cannot remove file.", blockFile.delete());
assertTrue("Cannot remove file.", metadataFile.delete());
}
}
}
// Run the direcrtoryScanner to update the Datanodes volumeMap
DataNode dn = cluster.getDataNodes().get(0);
DataNodeTestUtils.runDirectoryScanner(dn);
// Occasionally the BlockPoolSliceScanner can run before we have removed
// the blocks. Restart the Datanode to trigger the scanner into running
// once more.
LOG.info("Restarting Datanode to trigger BlockPoolSliceScanner");
cluster.restartDataNodes();
cluster.waitActive();
badFiles =
namenode.getNamesystem().listCorruptFileBlocks("/srcdat2", null);
while (badFiles.size() < maxCorruptFileBlocks) {
LOG.info("# of corrupt files is: " + badFiles.size());
Thread.sleep(10000);
badFiles = namenode.getNamesystem().
listCorruptFileBlocks("/srcdat2", null);
}
badFiles = namenode.getNamesystem().
listCorruptFileBlocks("/srcdat2", null);
LOG.info("Namenode has bad files. " + badFiles.size());
assertTrue("Namenode has " + badFiles.size() + " bad files. Expecting " +
maxCorruptFileBlocks + ".",
badFiles.size() == maxCorruptFileBlocks);
CorruptFileBlockIterator iter = (CorruptFileBlockIterator)
fs.listCorruptFileBlocks(new Path("/srcdat2"));
int corruptPaths = countPaths(iter);
assertTrue("Expected more than " + maxCorruptFileBlocks +
" corrupt file blocks but got " + corruptPaths,
corruptPaths > maxCorruptFileBlocks);
assertTrue("Iterator should have made more than 1 call but made " +
iter.getCallsMade(),
iter.getCallsMade() > 1);
util.cleanup(fs, "/srcdat2");
} finally {
if (cluster != null) { cluster.shutdown(); }
}
}
}
| 22,462 | 41.144465 | 109 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import javax.servlet.ServletContext;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.http.HttpServer2;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.util.KerberosName;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.junit.Test;
import org.mockito.ArgumentMatcher;
import org.mockito.Mockito;
public class TestGetImageServlet {
@Test
public void testIsValidRequestor() throws IOException {
Configuration conf = new HdfsConfiguration();
KerberosName.setRules("RULE:[1:$1]\nRULE:[2:$1]");
// Set up generic HA configs.
conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns1");
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX,
"ns1"), "nn1,nn2");
// Set up NN1 HA configs.
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY,
"ns1", "nn1"), "host1:1234");
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY,
"ns1", "nn1"), "hdfs/[email protected]");
// Set up NN2 HA configs.
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY,
"ns1", "nn2"), "host2:1234");
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY,
"ns1", "nn2"), "hdfs/[email protected]");
// Initialize this conf object as though we're running on NN1.
NameNode.initializeGenericKeys(conf, "ns1", "nn1");
AccessControlList acls = Mockito.mock(AccessControlList.class);
Mockito.when(acls.isUserAllowed(Mockito.<UserGroupInformation>any())).thenReturn(false);
ServletContext context = Mockito.mock(ServletContext.class);
Mockito.when(context.getAttribute(HttpServer2.ADMINS_ACL)).thenReturn(acls);
// Make sure that NN2 is considered a valid fsimage/edits requestor.
assertTrue(ImageServlet.isValidRequestor(context,
"hdfs/[email protected]", conf));
// Mark atm as an admin.
Mockito.when(acls.isUserAllowed(Mockito.argThat(new ArgumentMatcher<UserGroupInformation>() {
@Override
public boolean matches(Object argument) {
return ((UserGroupInformation) argument).getShortUserName().equals("atm");
}
}))).thenReturn(true);
// Make sure that NN2 is still considered a valid requestor.
assertTrue(ImageServlet.isValidRequestor(context,
"hdfs/[email protected]", conf));
// Make sure an admin is considered a valid requestor.
assertTrue(ImageServlet.isValidRequestor(context,
"[email protected]", conf));
// Make sure other users are *not* considered valid requestors.
assertFalse(ImageServlet.isValidRequestor(context,
"[email protected]", conf));
}
}
| 3,930 | 39.947917 | 97 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.mock;
import java.io.IOException;
import java.net.URI;
import java.util.Collection;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.junit.Test;
public class TestGenericJournalConf {
private static final String DUMMY_URI = "dummy://test";
/**
* Test that an exception is thrown if a journal class doesn't exist
* in the configuration
*/
@Test(expected=IllegalArgumentException.class)
public void testNotConfigured() throws Exception {
MiniDFSCluster cluster = null;
Configuration conf = new Configuration();
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
"dummy://test");
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
/**
* Test that an exception is thrown if a journal class doesn't
* exist in the classloader.
*/
@Test(expected=IllegalArgumentException.class)
public void testClassDoesntExist() throws Exception {
MiniDFSCluster cluster = null;
Configuration conf = new Configuration();
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX + ".dummy",
"org.apache.hadoop.nonexistent");
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
"dummy://test");
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
/**
* Test that a implementation of JournalManager without a
* (Configuration,URI) constructor throws an exception
*/
@Test
public void testBadConstructor() throws Exception {
MiniDFSCluster cluster = null;
Configuration conf = new Configuration();
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX + ".dummy",
BadConstructorJournalManager.class.getName());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
"dummy://test");
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
fail("Should have failed before this point");
} catch (IllegalArgumentException iae) {
if (!iae.getMessage().contains("Unable to construct journal")) {
fail("Should have failed with unable to construct exception");
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
/**
* Test that a dummy implementation of JournalManager can
* be initialized on startup
*/
@Test
public void testDummyJournalManager() throws Exception {
MiniDFSCluster cluster = null;
Configuration conf = new Configuration();
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX + ".dummy",
DummyJournalManager.class.getName());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, DUMMY_URI);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_MINIMUM_KEY, 0);
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
assertTrue(DummyJournalManager.shouldPromptCalled);
assertTrue(DummyJournalManager.formatCalled);
assertNotNull(DummyJournalManager.conf);
assertEquals(new URI(DUMMY_URI), DummyJournalManager.uri);
assertNotNull(DummyJournalManager.nsInfo);
assertEquals(DummyJournalManager.nsInfo.getClusterID(),
cluster.getNameNode().getNamesystem().getClusterId());
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
public static class DummyJournalManager implements JournalManager {
static Configuration conf = null;
static URI uri = null;
static NamespaceInfo nsInfo = null;
static boolean formatCalled = false;
static boolean shouldPromptCalled = false;
public DummyJournalManager(Configuration conf, URI u,
NamespaceInfo nsInfo) {
// Set static vars so the test case can verify them.
DummyJournalManager.conf = conf;
DummyJournalManager.uri = u;
DummyJournalManager.nsInfo = nsInfo;
}
@Override
public void format(NamespaceInfo nsInfo) throws IOException {
formatCalled = true;
}
@Override
public EditLogOutputStream startLogSegment(long txId, int layoutVersion)
throws IOException {
return mock(EditLogOutputStream.class);
}
@Override
public void finalizeLogSegment(long firstTxId, long lastTxId)
throws IOException {
// noop
}
@Override
public void selectInputStreams(Collection<EditLogInputStream> streams,
long fromTxnId, boolean inProgressOk) {
}
@Override
public void setOutputBufferCapacity(int size) {}
@Override
public void purgeLogsOlderThan(long minTxIdToKeep)
throws IOException {}
@Override
public void recoverUnfinalizedSegments() throws IOException {}
@Override
public void close() throws IOException {}
@Override
public boolean hasSomeData() throws IOException {
shouldPromptCalled = true;
return false;
}
@Override
public void doPreUpgrade() throws IOException {}
@Override
public void doUpgrade(Storage storage) throws IOException {}
@Override
public void doFinalize() throws IOException {}
@Override
public boolean canRollBack(StorageInfo storage, StorageInfo prevStorage, int targetLayoutVersion)
throws IOException {
return false;
}
@Override
public void doRollback() throws IOException {}
@Override
public void discardSegments(long startTxId) throws IOException {}
@Override
public long getJournalCTime() throws IOException {
return -1;
}
}
public static class BadConstructorJournalManager extends DummyJournalManager {
public BadConstructorJournalManager() {
super(null, null, null);
}
@Override
public void doPreUpgrade() throws IOException {}
@Override
public void doUpgrade(Storage storage) throws IOException {}
@Override
public void doFinalize() throws IOException {}
@Override
public boolean canRollBack(StorageInfo storage, StorageInfo prevStorage, int targetLayoutVersion)
throws IOException {
return false;
}
@Override
public void doRollback() throws IOException {}
@Override
public long getJournalCTime() throws IOException {
return -1;
}
}
}
| 7,837 | 29.498054 | 101 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.Collections;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.Before;
import org.junit.Test;
import com.google.common.base.Supplier;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Lists;
public class TestBackupNode {
public static final Log LOG = LogFactory.getLog(TestBackupNode.class);
static {
((Log4JLogger)Checkpointer.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)BackupImage.LOG).getLogger().setLevel(Level.ALL);
}
static final String BASE_DIR = MiniDFSCluster.getBaseDirectory();
@Before
public void setUp() throws Exception {
File baseDir = new File(BASE_DIR);
if(baseDir.exists())
if(!(FileUtil.fullyDelete(baseDir)))
throw new IOException("Cannot remove directory: " + baseDir);
File dirC = new File(getBackupNodeDir(StartupOption.CHECKPOINT, 1));
dirC.mkdirs();
File dirB = new File(getBackupNodeDir(StartupOption.BACKUP, 1));
dirB.mkdirs();
dirB = new File(getBackupNodeDir(StartupOption.BACKUP, 2));
dirB.mkdirs();
}
static String getBackupNodeDir(StartupOption t, int idx) {
return BASE_DIR + "name" + t.getName() + idx + "/";
}
BackupNode startBackupNode(Configuration conf,
StartupOption startupOpt,
int idx) throws IOException {
Configuration c = new HdfsConfiguration(conf);
String dirs = getBackupNodeDir(startupOpt, idx);
c.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, dirs);
c.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
"${" + DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + "}");
c.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY,
"127.0.0.1:0");
c.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
"127.0.0.1:0");
BackupNode bn = (BackupNode)NameNode.createNameNode(
new String[]{startupOpt.getName()}, c);
assertTrue(bn.getRole() + " must be in SafeMode.", bn.isInSafeMode());
assertTrue(bn.getRole() + " must be in StandbyState",
bn.getNamesystem().getHAState()
.equalsIgnoreCase(HAServiceState.STANDBY.name()));
return bn;
}
void waitCheckpointDone(MiniDFSCluster cluster, long txid) {
long thisCheckpointTxId;
do {
try {
LOG.info("Waiting checkpoint to complete... " +
"checkpoint txid should increase above " + txid);
Thread.sleep(1000);
} catch (Exception e) {}
// The checkpoint is not done until the nn has received it from the bn
thisCheckpointTxId = cluster.getNameNode().getFSImage().getStorage()
.getMostRecentCheckpointTxId();
} while (thisCheckpointTxId < txid);
// Check that the checkpoint got uploaded to NN successfully
FSImageTestUtil.assertNNHasCheckpoints(cluster,
Collections.singletonList((int)thisCheckpointTxId));
}
@Test
public void testCheckpointNode() throws Exception {
testCheckpoint(StartupOption.CHECKPOINT);
}
/**
* Ensure that the backupnode will tail edits from the NN
* and keep in sync, even while the NN rolls, checkpoints
* occur, etc.
*/
@Test
public void testBackupNodeTailsEdits() throws Exception {
Configuration conf = new HdfsConfiguration();
HAUtil.setAllowStandbyReads(conf, true);
MiniDFSCluster cluster = null;
FileSystem fileSys = null;
BackupNode backup = null;
try {
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(0).build();
fileSys = cluster.getFileSystem();
backup = startBackupNode(conf, StartupOption.BACKUP, 1);
BackupImage bnImage = (BackupImage) backup.getFSImage();
testBNInSync(cluster, backup, 1);
// Force a roll -- BN should roll with NN.
NameNode nn = cluster.getNameNode();
NamenodeProtocols nnRpc = nn.getRpcServer();
nnRpc.rollEditLog();
assertEquals(bnImage.getEditLog().getCurSegmentTxId(),
nn.getFSImage().getEditLog().getCurSegmentTxId());
// BN should stay in sync after roll
testBNInSync(cluster, backup, 2);
long nnImageBefore =
nn.getFSImage().getStorage().getMostRecentCheckpointTxId();
// BN checkpoint
backup.doCheckpoint();
// NN should have received a new image
long nnImageAfter =
nn.getFSImage().getStorage().getMostRecentCheckpointTxId();
assertTrue("nn should have received new checkpoint. before: " +
nnImageBefore + " after: " + nnImageAfter,
nnImageAfter > nnImageBefore);
// BN should stay in sync after checkpoint
testBNInSync(cluster, backup, 3);
// Stop BN
StorageDirectory sd = bnImage.getStorage().getStorageDir(0);
backup.stop();
backup = null;
// When shutting down the BN, it shouldn't finalize logs that are
// still open on the NN
EditLogFile editsLog = FSImageTestUtil.findLatestEditsLog(sd);
assertEquals(editsLog.getFirstTxId(),
nn.getFSImage().getEditLog().getCurSegmentTxId());
assertTrue("Should not have finalized " + editsLog,
editsLog.isInProgress());
// do some edits
assertTrue(fileSys.mkdirs(new Path("/edit-while-bn-down")));
// start a new backup node
backup = startBackupNode(conf, StartupOption.BACKUP, 1);
testBNInSync(cluster, backup, 4);
assertNotNull(backup.getNamesystem().getFileInfo("/edit-while-bn-down", false));
} finally {
LOG.info("Shutting down...");
if (backup != null) backup.stop();
if (fileSys != null) fileSys.close();
if (cluster != null) cluster.shutdown();
}
assertStorageDirsMatch(cluster.getNameNode(), backup);
}
private void testBNInSync(MiniDFSCluster cluster, final BackupNode backup,
int testIdx) throws Exception {
final NameNode nn = cluster.getNameNode();
final FileSystem fs = cluster.getFileSystem();
// Do a bunch of namespace operations, make sure they're replicated
// to the BN.
for (int i = 0; i < 10; i++) {
final String src = "/test_" + testIdx + "_" + i;
LOG.info("Creating " + src + " on NN");
Path p = new Path(src);
assertTrue(fs.mkdirs(p));
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
LOG.info("Checking for " + src + " on BN");
try {
boolean hasFile = backup.getNamesystem().getFileInfo(src, false) != null;
boolean txnIdMatch =
backup.getRpcServer().getTransactionID() ==
nn.getRpcServer().getTransactionID();
return hasFile && txnIdMatch;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}, 30, 10000);
}
assertStorageDirsMatch(nn, backup);
}
private void assertStorageDirsMatch(final NameNode nn, final BackupNode backup)
throws Exception {
// Check that the stored files in the name dirs are identical
List<File> dirs = Lists.newArrayList(
FSImageTestUtil.getCurrentDirs(nn.getFSImage().getStorage(),
null));
dirs.addAll(FSImageTestUtil.getCurrentDirs(backup.getFSImage().getStorage(),
null));
FSImageTestUtil.assertParallelFilesAreIdentical(dirs, ImmutableSet.of("VERSION"));
}
@Test
public void testBackupNode() throws Exception {
testCheckpoint(StartupOption.BACKUP);
}
void testCheckpoint(StartupOption op) throws Exception {
Path file1 = new Path("/checkpoint.dat");
Path file2 = new Path("/checkpoint2.dat");
Path file3 = new Path("/backup.dat");
Configuration conf = new HdfsConfiguration();
HAUtil.setAllowStandbyReads(conf, true);
short replication = (short)conf.getInt("dfs.replication", 3);
int numDatanodes = Math.max(3, replication);
conf.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, "localhost:0");
conf.set(DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY, "0");
conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1); // disable block scanner
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 1);
MiniDFSCluster cluster = null;
FileSystem fileSys = null;
BackupNode backup = null;
try {
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(0).build();
fileSys = cluster.getFileSystem();
//
// verify that 'format' really blew away all pre-existing files
//
assertTrue(!fileSys.exists(file1));
assertTrue(!fileSys.exists(file2));
//
// Create file1
//
assertTrue(fileSys.mkdirs(file1));
//
// Take a checkpoint
//
long txid = cluster.getNameNodeRpc().getTransactionID();
backup = startBackupNode(conf, op, 1);
waitCheckpointDone(cluster, txid);
} catch(IOException e) {
LOG.error("Error in TestBackupNode:", e);
assertTrue(e.getLocalizedMessage(), false);
} finally {
if(backup != null) backup.stop();
if(fileSys != null) fileSys.close();
if(cluster != null) cluster.shutdown();
}
File nnCurDir = new File(BASE_DIR, "name1/current/");
File bnCurDir = new File(getBackupNodeDir(op, 1), "/current/");
FSImageTestUtil.assertParallelFilesAreIdentical(
ImmutableList.of(bnCurDir, nnCurDir),
ImmutableSet.<String>of("VERSION"));
try {
//
// Restart cluster and verify that file1 still exist.
//
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
.format(false).build();
fileSys = cluster.getFileSystem();
// check that file1 still exists
assertTrue(fileSys.exists(file1));
fileSys.delete(file1, true);
// create new file file2
fileSys.mkdirs(file2);
//
// Take a checkpoint
//
long txid = cluster.getNameNodeRpc().getTransactionID();
backup = startBackupNode(conf, op, 1);
waitCheckpointDone(cluster, txid);
for (int i = 0; i < 10; i++) {
fileSys.mkdirs(new Path("file_" + i));
}
txid = cluster.getNameNodeRpc().getTransactionID();
backup.doCheckpoint();
waitCheckpointDone(cluster, txid);
txid = cluster.getNameNodeRpc().getTransactionID();
backup.doCheckpoint();
waitCheckpointDone(cluster, txid);
// Try BackupNode operations
InetSocketAddress add = backup.getNameNodeAddress();
// Write to BN
FileSystem bnFS = FileSystem.get(new Path("hdfs://"
+ NetUtils.getHostPortString(add)).toUri(), conf);
boolean canWrite = true;
try {
TestCheckpoint.writeFile(bnFS, file3, replication);
} catch (IOException eio) {
LOG.info("Write to " + backup.getRole() + " failed as expected: ", eio);
canWrite = false;
}
assertFalse("Write to BackupNode must be prohibited.", canWrite);
// Reads are allowed for BackupNode, but not for CheckpointNode
boolean canRead = true;
try {
bnFS.exists(file2);
} catch (IOException eio) {
LOG.info("Read from " + backup.getRole() + " failed: ", eio);
canRead = false;
}
assertEquals("Reads to BackupNode are allowed, but not CheckpointNode.",
canRead, backup.isRole(NamenodeRole.BACKUP));
TestCheckpoint.writeFile(fileSys, file3, replication);
TestCheckpoint.checkFile(fileSys, file3, replication);
// should also be on BN right away
assertTrue("file3 does not exist on BackupNode",
op != StartupOption.BACKUP ||
backup.getNamesystem().getFileInfo(
file3.toUri().getPath(), false) != null);
} catch(IOException e) {
LOG.error("Error in TestBackupNode:", e);
throw new AssertionError(e);
} finally {
if(backup != null) backup.stop();
if(fileSys != null) fileSys.close();
if(cluster != null) cluster.shutdown();
}
FSImageTestUtil.assertParallelFilesAreIdentical(
ImmutableList.of(bnCurDir, nnCurDir),
ImmutableSet.<String>of("VERSION"));
try {
//
// Restart cluster and verify that file2 exists and
// file1 does not exist.
//
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).build();
fileSys = cluster.getFileSystem();
assertTrue(!fileSys.exists(file1));
// verify that file2 exists
assertTrue(fileSys.exists(file2));
} catch(IOException e) {
LOG.error("Error in TestBackupNode: ", e);
assertTrue(e.getLocalizedMessage(), false);
} finally {
fileSys.close();
cluster.shutdown();
}
}
/**
* Verify that a file can be read both from NameNode and BackupNode.
*/
@Test
public void testCanReadData() throws IOException {
Path file1 = new Path("/fileToRead.dat");
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
FileSystem fileSys = null;
BackupNode backup = null;
try {
// Start NameNode and BackupNode
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(0).format(true).build();
fileSys = cluster.getFileSystem();
long txid = cluster.getNameNodeRpc().getTransactionID();
backup = startBackupNode(conf, StartupOption.BACKUP, 1);
waitCheckpointDone(cluster, txid);
// Setup dual NameNode configuration for DataNodes
String rpcAddrKeyPreffix =
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + ".bnCluster";
String nnAddr = cluster.getNameNode().getNameNodeAddressHostPortString();
conf.get(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
String bnAddr = backup.getNameNodeAddressHostPortString();
conf.set(DFSConfigKeys.DFS_NAMESERVICES, "bnCluster");
conf.set(DFSConfigKeys.DFS_NAMESERVICE_ID, "bnCluster");
conf.set(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + ".bnCluster",
"nnActive, nnBackup");
conf.set(rpcAddrKeyPreffix + ".nnActive", nnAddr);
conf.set(rpcAddrKeyPreffix + ".nnBackup", bnAddr);
cluster.startDataNodes(conf, 3, true, StartupOption.REGULAR, null);
DFSTestUtil.createFile(
fileSys, file1, 8192, (short)3, 0);
// Read the same file from file systems pointing to NN and BN
FileSystem bnFS = FileSystem.get(
new Path("hdfs://" + bnAddr).toUri(), conf);
String nnData = DFSTestUtil.readFile(fileSys, file1);
String bnData = DFSTestUtil.readFile(bnFS, file1);
assertEquals("Data read from BackupNode and NameNode is not the same.",
nnData, bnData);
} catch(IOException e) {
LOG.error("Error in TestBackupNode: ", e);
assertTrue(e.getLocalizedMessage(), false);
} finally {
if(fileSys != null) fileSys.close();
if(backup != null) backup.stop();
if(cluster != null) cluster.shutdown();
}
}
}
| 17,278 | 36 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCreateEditsLog.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.fail;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
import java.io.File;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.common.Util;
/**
* Tests the CreateEditsLog utility.
*/
public class TestCreateEditsLog {
private static final File HDFS_DIR = new File(
MiniDFSCluster.getBaseDirectory()).getAbsoluteFile();
private static final File TEST_DIR = new File(
System.getProperty("test.build.data", "build/test/data"),
"TestCreateEditsLog").getAbsoluteFile();
private MiniDFSCluster cluster;
@Before
public void setUp() throws Exception {
deleteIfExists(HDFS_DIR);
deleteIfExists(TEST_DIR);
}
@After
public void tearDown() {
if (cluster != null) {
cluster.shutdown();
cluster = null;
}
deleteIfExists(HDFS_DIR);
deleteIfExists(TEST_DIR);
}
/**
* Tests that an edits log created using CreateEditsLog is valid and can be
* loaded successfully by a namenode.
*/
@Test(timeout=60000)
public void testCanLoadCreatedEditsLog() throws Exception {
// Format namenode.
HdfsConfiguration conf = new HdfsConfiguration();
File nameDir = new File(HDFS_DIR, "name");
conf.set(DFS_NAMENODE_NAME_DIR_KEY, Util.fileAsURI(nameDir).toString());
DFSTestUtil.formatNameNode(conf);
// Call CreateEditsLog and move the resulting edits to the name dir.
CreateEditsLog.main(new String[] { "-f", "1000", "0", "1", "-d",
TEST_DIR.getAbsolutePath() });
Path editsWildcard = new Path(TEST_DIR.getAbsolutePath(), "*");
FileContext localFc = FileContext.getLocalFSFileContext();
for (FileStatus edits: localFc.util().globStatus(editsWildcard)) {
Path src = edits.getPath();
Path dst = new Path(new File(nameDir, "current").getAbsolutePath(),
src.getName());
localFc.rename(src, dst);
}
// Start a namenode to try to load the edits.
cluster = new MiniDFSCluster.Builder(conf)
.format(false)
.manageNameDfsDirs(false)
.waitSafeMode(false)
.build();
cluster.waitClusterUp();
// Test successful, because no exception thrown.
}
/**
* Fully delete the given directory if it exists.
*
* @param file File to delete
*/
private static void deleteIfExists(File file) {
if (file.exists() && !FileUtil.fullyDelete(file)) {
fail("Could not delete '" + file + "'");
}
}
}
| 3,643 | 31.247788 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.StringReader;
import java.util.EnumSet;
import java.util.List;
import java.util.Random;
import com.google.common.collect.ImmutableList;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import com.google.common.collect.Lists;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
/**
* Test {@link FSDirectory}, the in-memory namespace tree.
*/
public class TestFSDirectory {
public static final Log LOG = LogFactory.getLog(TestFSDirectory.class);
private static final long seed = 0;
private static final short REPLICATION = 3;
private final Path dir = new Path("/" + getClass().getSimpleName());
private final Path sub1 = new Path(dir, "sub1");
private final Path file1 = new Path(sub1, "file1");
private final Path file2 = new Path(sub1, "file2");
private final Path sub11 = new Path(sub1, "sub11");
private final Path file3 = new Path(sub11, "file3");
private final Path file5 = new Path(sub1, "z_file5");
private final Path sub2 = new Path(dir, "sub2");
private final Path file6 = new Path(sub2, "file6");
private Configuration conf;
private MiniDFSCluster cluster;
private FSNamesystem fsn;
private FSDirectory fsdir;
private DistributedFileSystem hdfs;
private static final int numGeneratedXAttrs = 256;
private static final ImmutableList<XAttr> generatedXAttrs =
ImmutableList.copyOf(generateXAttrs(numGeneratedXAttrs));
@Before
public void setUp() throws Exception {
conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY, 2);
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(REPLICATION)
.build();
cluster.waitActive();
fsn = cluster.getNamesystem();
fsdir = fsn.getFSDirectory();
hdfs = cluster.getFileSystem();
DFSTestUtil.createFile(hdfs, file1, 1024, REPLICATION, seed);
DFSTestUtil.createFile(hdfs, file2, 1024, REPLICATION, seed);
DFSTestUtil.createFile(hdfs, file3, 1024, REPLICATION, seed);
DFSTestUtil.createFile(hdfs, file5, 1024, REPLICATION, seed);
hdfs.mkdirs(sub2);
}
@After
public void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
/** Dump the tree, make some changes, and then dump the tree again. */
@Test
public void testDumpTree() throws Exception {
final INode root = fsdir.getINode("/");
LOG.info("Original tree");
final StringBuffer b1 = root.dumpTreeRecursively();
System.out.println("b1=" + b1);
final BufferedReader in = new BufferedReader(new StringReader(b1.toString()));
String line = in.readLine();
checkClassName(line);
for(; (line = in.readLine()) != null; ) {
line = line.trim();
if (!line.isEmpty() && !line.contains("snapshot")) {
assertTrue("line=" + line,
line.startsWith(INodeDirectory.DUMPTREE_LAST_ITEM)
|| line.startsWith(INodeDirectory.DUMPTREE_EXCEPT_LAST_ITEM)
);
checkClassName(line);
}
}
}
@Test
public void testSkipQuotaCheck() throws Exception {
try {
// set quota. nsQuota of 1 means no files can be created
// under this directory.
hdfs.setQuota(sub2, 1, Long.MAX_VALUE);
// create a file
try {
// this should fail
DFSTestUtil.createFile(hdfs, file6, 1024, REPLICATION, seed);
throw new IOException("The create should have failed.");
} catch (NSQuotaExceededException qe) {
// ignored
}
// disable the quota check and retry. this should succeed.
fsdir.disableQuotaChecks();
DFSTestUtil.createFile(hdfs, file6, 1024, REPLICATION, seed);
// trying again after re-enabling the check.
hdfs.delete(file6, false); // cleanup
fsdir.enableQuotaChecks();
try {
// this should fail
DFSTestUtil.createFile(hdfs, file6, 1024, REPLICATION, seed);
throw new IOException("The create should have failed.");
} catch (NSQuotaExceededException qe) {
// ignored
}
} finally {
hdfs.delete(file6, false); // cleanup, in case the test failed in the middle.
hdfs.setQuota(sub2, Long.MAX_VALUE, Long.MAX_VALUE);
}
}
static void checkClassName(String line) {
int i = line.lastIndexOf('(');
int j = line.lastIndexOf('@');
final String classname = line.substring(i+1, j);
assertTrue(classname.startsWith(INodeFile.class.getSimpleName())
|| classname.startsWith(INodeDirectory.class.getSimpleName()));
}
@Test
public void testINodeXAttrsLimit() throws Exception {
List<XAttr> existingXAttrs = Lists.newArrayListWithCapacity(2);
XAttr xAttr1 = (new XAttr.Builder()).setNameSpace(XAttr.NameSpace.USER).
setName("a1").setValue(new byte[]{0x31, 0x32, 0x33}).build();
XAttr xAttr2 = (new XAttr.Builder()).setNameSpace(XAttr.NameSpace.USER).
setName("a2").setValue(new byte[]{0x31, 0x31, 0x31}).build();
existingXAttrs.add(xAttr1);
existingXAttrs.add(xAttr2);
// Adding system and raw namespace xAttrs aren't affected by inode
// xAttrs limit.
XAttr newSystemXAttr = (new XAttr.Builder()).
setNameSpace(XAttr.NameSpace.SYSTEM).setName("a3").
setValue(new byte[]{0x33, 0x33, 0x33}).build();
XAttr newRawXAttr = (new XAttr.Builder()).setNameSpace(XAttr.NameSpace.RAW).
setName("a3").setValue(new byte[]{0x33, 0x33, 0x33}).build();
List<XAttr> newXAttrs = Lists.newArrayListWithCapacity(2);
newXAttrs.add(newSystemXAttr);
newXAttrs.add(newRawXAttr);
List<XAttr> xAttrs = FSDirXAttrOp.setINodeXAttrs(fsdir, existingXAttrs,
newXAttrs, EnumSet.of(
XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE));
assertEquals(xAttrs.size(), 4);
// Adding a trusted namespace xAttr, is affected by inode xAttrs limit.
XAttr newXAttr1 = (new XAttr.Builder()).setNameSpace(
XAttr.NameSpace.TRUSTED).setName("a4").
setValue(new byte[]{0x34, 0x34, 0x34}).build();
newXAttrs.set(0, newXAttr1);
try {
FSDirXAttrOp.setINodeXAttrs(fsdir, existingXAttrs, newXAttrs,
EnumSet.of(XAttrSetFlag.CREATE,
XAttrSetFlag.REPLACE));
fail("Setting user visible xattr on inode should fail if " +
"reaching limit.");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("Cannot add additional XAttr " +
"to inode, would exceed limit", e);
}
}
/**
* Verify that the first <i>num</i> generatedXAttrs are present in
* newXAttrs.
*/
private static void verifyXAttrsPresent(List<XAttr> newXAttrs,
final int num) {
assertEquals("Unexpected number of XAttrs after multiset", num,
newXAttrs.size());
for (int i=0; i<num; i++) {
XAttr search = generatedXAttrs.get(i);
assertTrue("Did not find set XAttr " + search + " + after multiset",
newXAttrs.contains(search));
}
}
private static List<XAttr> generateXAttrs(final int numXAttrs) {
List<XAttr> generatedXAttrs = Lists.newArrayListWithCapacity(numXAttrs);
for (int i=0; i<numXAttrs; i++) {
XAttr xAttr = (new XAttr.Builder())
.setNameSpace(XAttr.NameSpace.SYSTEM)
.setName("a" + i)
.setValue(new byte[] { (byte) i, (byte) (i + 1), (byte) (i + 2) })
.build();
generatedXAttrs.add(xAttr);
}
return generatedXAttrs;
}
/**
* Test setting and removing multiple xattrs via single operations
*/
@Test(timeout=300000)
public void testXAttrMultiSetRemove() throws Exception {
List<XAttr> existingXAttrs = Lists.newArrayListWithCapacity(0);
// Keep adding a random number of xattrs and verifying until exhausted
final Random rand = new Random(0xFEEDA);
int numExpectedXAttrs = 0;
while (numExpectedXAttrs < numGeneratedXAttrs) {
LOG.info("Currently have " + numExpectedXAttrs + " xattrs");
final int numToAdd = rand.nextInt(5)+1;
List<XAttr> toAdd = Lists.newArrayListWithCapacity(numToAdd);
for (int i = 0; i < numToAdd; i++) {
if (numExpectedXAttrs >= numGeneratedXAttrs) {
break;
}
toAdd.add(generatedXAttrs.get(numExpectedXAttrs));
numExpectedXAttrs++;
}
LOG.info("Attempting to add " + toAdd.size() + " XAttrs");
for (int i = 0; i < toAdd.size(); i++) {
LOG.info("Will add XAttr " + toAdd.get(i));
}
List<XAttr> newXAttrs = FSDirXAttrOp.setINodeXAttrs(fsdir, existingXAttrs,
toAdd, EnumSet.of(
XAttrSetFlag.CREATE));
verifyXAttrsPresent(newXAttrs, numExpectedXAttrs);
existingXAttrs = newXAttrs;
}
// Keep removing a random number of xattrs and verifying until all gone
while (numExpectedXAttrs > 0) {
LOG.info("Currently have " + numExpectedXAttrs + " xattrs");
final int numToRemove = rand.nextInt(5)+1;
List<XAttr> toRemove = Lists.newArrayListWithCapacity(numToRemove);
for (int i = 0; i < numToRemove; i++) {
if (numExpectedXAttrs == 0) {
break;
}
toRemove.add(generatedXAttrs.get(numExpectedXAttrs-1));
numExpectedXAttrs--;
}
final int expectedNumToRemove = toRemove.size();
LOG.info("Attempting to remove " + expectedNumToRemove + " XAttrs");
List<XAttr> removedXAttrs = Lists.newArrayList();
List<XAttr> newXAttrs = FSDirXAttrOp.filterINodeXAttrs(existingXAttrs,
toRemove,
removedXAttrs);
assertEquals("Unexpected number of removed XAttrs",
expectedNumToRemove, removedXAttrs.size());
verifyXAttrsPresent(newXAttrs, numExpectedXAttrs);
existingXAttrs = newXAttrs;
}
}
@Test(timeout=300000)
public void testXAttrMultiAddRemoveErrors() throws Exception {
// Test that the same XAttr can not be multiset twice
List<XAttr> existingXAttrs = Lists.newArrayList();
List<XAttr> toAdd = Lists.newArrayList();
toAdd.add(generatedXAttrs.get(0));
toAdd.add(generatedXAttrs.get(1));
toAdd.add(generatedXAttrs.get(2));
toAdd.add(generatedXAttrs.get(0));
try {
FSDirXAttrOp.setINodeXAttrs(fsdir, existingXAttrs, toAdd,
EnumSet.of(XAttrSetFlag.CREATE));
fail("Specified the same xattr to be set twice");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("Cannot specify the same " +
"XAttr to be set", e);
}
// Test that CREATE and REPLACE flags are obeyed
toAdd.remove(generatedXAttrs.get(0));
existingXAttrs.add(generatedXAttrs.get(0));
try {
FSDirXAttrOp.setINodeXAttrs(fsdir, existingXAttrs, toAdd,
EnumSet.of(XAttrSetFlag.CREATE));
fail("Set XAttr that is already set without REPLACE flag");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("already exists", e);
}
try {
FSDirXAttrOp.setINodeXAttrs(fsdir, existingXAttrs, toAdd,
EnumSet.of(XAttrSetFlag.REPLACE));
fail("Set XAttr that does not exist without the CREATE flag");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("does not exist", e);
}
// Sanity test for CREATE
toAdd.remove(generatedXAttrs.get(0));
List<XAttr> newXAttrs = FSDirXAttrOp.setINodeXAttrs(fsdir, existingXAttrs,
toAdd, EnumSet.of(
XAttrSetFlag.CREATE));
assertEquals("Unexpected toAdd size", 2, toAdd.size());
for (XAttr x : toAdd) {
assertTrue("Did not find added XAttr " + x, newXAttrs.contains(x));
}
existingXAttrs = newXAttrs;
// Sanity test for REPLACE
toAdd = Lists.newArrayList();
for (int i=0; i<3; i++) {
XAttr xAttr = (new XAttr.Builder())
.setNameSpace(XAttr.NameSpace.SYSTEM)
.setName("a" + i)
.setValue(new byte[] { (byte) (i*2) })
.build();
toAdd.add(xAttr);
}
newXAttrs = FSDirXAttrOp.setINodeXAttrs(fsdir, existingXAttrs, toAdd,
EnumSet.of(XAttrSetFlag.REPLACE));
assertEquals("Unexpected number of new XAttrs", 3, newXAttrs.size());
for (int i=0; i<3; i++) {
assertArrayEquals("Unexpected XAttr value",
new byte[] {(byte)(i*2)}, newXAttrs.get(i).getValue());
}
existingXAttrs = newXAttrs;
// Sanity test for CREATE+REPLACE
toAdd = Lists.newArrayList();
for (int i=0; i<4; i++) {
toAdd.add(generatedXAttrs.get(i));
}
newXAttrs = FSDirXAttrOp.setINodeXAttrs(fsdir, existingXAttrs, toAdd,
EnumSet.of(XAttrSetFlag.CREATE,
XAttrSetFlag.REPLACE));
verifyXAttrsPresent(newXAttrs, 4);
}
}
| 14,699 | 36.692308 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestXAttrConfigFlag.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.io.IOUtils;
import org.junit.After;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
/**
* Tests that the configuration flag that controls support for XAttrs is off
* and causes all attempted operations related to XAttrs to fail. The
* NameNode can still load XAttrs from fsimage or edits.
*/
public class TestXAttrConfigFlag {
private static final Path PATH = new Path("/path");
private MiniDFSCluster cluster;
private DistributedFileSystem fs;
@Rule
public ExpectedException exception = ExpectedException.none();
@After
public void shutdown() throws Exception {
IOUtils.cleanup(null, fs);
if (cluster != null) {
cluster.shutdown();
}
}
@Test
public void testSetXAttr() throws Exception {
initCluster(true, false);
fs.mkdirs(PATH);
expectException();
fs.setXAttr(PATH, "user.foo", null);
}
@Test
public void testGetXAttrs() throws Exception {
initCluster(true, false);
fs.mkdirs(PATH);
expectException();
fs.getXAttrs(PATH);
}
@Test
public void testRemoveXAttr() throws Exception {
initCluster(true, false);
fs.mkdirs(PATH);
expectException();
fs.removeXAttr(PATH, "user.foo");
}
@Test
public void testEditLog() throws Exception {
// With XAttrs enabled, set an XAttr.
initCluster(true, true);
fs.mkdirs(PATH);
fs.setXAttr(PATH, "user.foo", null);
// Restart with XAttrs disabled. Expect successful restart.
restart(false, false);
}
@Test
public void testFsImage() throws Exception {
// With XAttrs enabled, set an XAttr.
initCluster(true, true);
fs.mkdirs(PATH);
fs.setXAttr(PATH, "user.foo", null);
// Save a new checkpoint and restart with XAttrs still enabled.
restart(true, true);
// Restart with XAttrs disabled. Expect successful restart.
restart(false, false);
}
/**
* We expect an IOException, and we want the exception text to state the
* configuration key that controls XAttr support.
*/
private void expectException() {
exception.expect(IOException.class);
exception.expectMessage(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY);
}
/**
* Initialize the cluster, wait for it to become active, and get FileSystem.
*
* @param format if true, format the NameNode and DataNodes before starting up
* @param xattrsEnabled if true, XAttr support is enabled
* @throws Exception if any step fails
*/
private void initCluster(boolean format, boolean xattrsEnabled)
throws Exception {
Configuration conf = new Configuration();
// not explicitly setting to false, should be false by default
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, xattrsEnabled);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(format)
.build();
cluster.waitActive();
fs = cluster.getFileSystem();
}
/**
* Restart the cluster, optionally saving a new checkpoint.
*
* @param checkpoint boolean true to save a new checkpoint
* @param xattrsEnabled if true, XAttr support is enabled
* @throws Exception if restart fails
*/
private void restart(boolean checkpoint, boolean xattrsEnabled)
throws Exception {
NameNode nameNode = cluster.getNameNode();
if (checkpoint) {
NameNodeAdapter.enterSafeMode(nameNode, false);
NameNodeAdapter.saveNamespace(nameNode);
}
shutdown();
initCluster(false, xattrsEnabled);
}
}
| 4,743 | 30.417219 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecureNameNodeWithExternalKdc.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
import static org.apache.hadoop.security.SecurityUtilTestHelper.isExternalKdcRunning;
import org.junit.Assume;
import org.junit.Before;
import org.junit.Test;
/**
* This test brings up a MiniDFSCluster with 1 NameNode and 0
* DataNodes with kerberos authentication enabled using user-specified
* KDC, principals, and keytabs.
*
* To run, users must specify the following system properties:
* externalKdc=true
* java.security.krb5.conf
* dfs.namenode.kerberos.principal
* dfs.namenode.kerberos.internal.spnego.principal
* dfs.namenode.keytab.file
* user.principal (do not specify superuser!)
* user.keytab
*/
public class TestSecureNameNodeWithExternalKdc {
final static private int NUM_OF_DATANODES = 0;
@Before
public void testExternalKdcRunning() {
// Tests are skipped if external KDC is not running.
Assume.assumeTrue(isExternalKdcRunning());
}
@Test
public void testSecureNameNode() throws IOException, InterruptedException {
MiniDFSCluster cluster = null;
try {
String nnPrincipal =
System.getProperty("dfs.namenode.kerberos.principal");
String nnSpnegoPrincipal =
System.getProperty("dfs.namenode.kerberos.internal.spnego.principal");
String nnKeyTab = System.getProperty("dfs.namenode.keytab.file");
assertNotNull("NameNode principal was not specified", nnPrincipal);
assertNotNull("NameNode SPNEGO principal was not specified",
nnSpnegoPrincipal);
assertNotNull("NameNode keytab was not specified", nnKeyTab);
Configuration conf = new HdfsConfiguration();
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
"kerberos");
conf.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, nnPrincipal);
conf.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY,
nnSpnegoPrincipal);
conf.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, nnKeyTab);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES)
.build();
final MiniDFSCluster clusterRef = cluster;
cluster.waitActive();
FileSystem fsForCurrentUser = cluster.getFileSystem();
fsForCurrentUser.mkdirs(new Path("/tmp"));
fsForCurrentUser.setPermission(new Path("/tmp"), new FsPermission(
(short) 511));
// The user specified should not be a superuser
String userPrincipal = System.getProperty("user.principal");
String userKeyTab = System.getProperty("user.keytab");
assertNotNull("User principal was not specified", userPrincipal);
assertNotNull("User keytab was not specified", userKeyTab);
UserGroupInformation ugi = UserGroupInformation
.loginUserFromKeytabAndReturnUGI(userPrincipal, userKeyTab);
FileSystem fs = ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws Exception {
return clusterRef.getFileSystem();
}
});
try {
Path p = new Path("/users");
fs.mkdirs(p);
fail("User must not be allowed to write in /");
} catch (IOException expected) {
}
Path p = new Path("/tmp/alpha");
fs.mkdirs(p);
assertNotNull(fs.listStatus(p));
assertEquals(AuthenticationMethod.KERBEROS,
ugi.getAuthenticationMethod());
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
}
| 5,044 | 37.807692 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.*;
import static org.hamcrest.CoreMatchers.is;
import static org.mockito.Matchers.anyBoolean;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import java.io.ByteArrayInputStream;
import java.net.HttpURLConnection;
import java.net.URL;
import java.util.EnumMap;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.util.Holder;
import org.apache.hadoop.hdfs.web.URLConnectionFactory;
import org.junit.Test;
import org.mockito.Mockito;
public class TestEditLogFileInputStream {
private static final byte[] FAKE_LOG_DATA = TestEditLog.HADOOP20_SOME_EDITS;
@Test
public void testReadURL() throws Exception {
HttpURLConnection conn = mock(HttpURLConnection.class);
doReturn(new ByteArrayInputStream(FAKE_LOG_DATA)).when(conn).getInputStream();
doReturn(HttpURLConnection.HTTP_OK).when(conn).getResponseCode();
doReturn(Integer.toString(FAKE_LOG_DATA.length)).when(conn).getHeaderField("Content-Length");
URLConnectionFactory factory = mock(URLConnectionFactory.class);
doReturn(conn).when(factory).openConnection(Mockito.<URL> any(),
anyBoolean());
URL url = new URL("http://localhost/fakeLog");
EditLogInputStream elis = EditLogFileInputStream.fromUrl(factory, url,
HdfsServerConstants.INVALID_TXID, HdfsServerConstants.INVALID_TXID, false);
// Read the edit log and verify that we got all of the data.
EnumMap<FSEditLogOpCodes, Holder<Integer>> counts = FSImageTestUtil
.countEditLogOpTypes(elis);
assertThat(counts.get(FSEditLogOpCodes.OP_ADD).held, is(1));
assertThat(counts.get(FSEditLogOpCodes.OP_SET_GENSTAMP_V1).held, is(1));
assertThat(counts.get(FSEditLogOpCodes.OP_CLOSE).held, is(1));
// Check that length header was picked up.
assertEquals(FAKE_LOG_DATA.length, elis.length());
elis.close();
}
}
| 2,772 | 41.015152 | 97 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithXAttr.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import java.util.EnumSet;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
/**
* 1) save xattrs, restart NN, assert xattrs reloaded from edit log,
* 2) save xattrs, create new checkpoint, restart NN, assert xattrs
* reloaded from fsimage
*/
public class TestFSImageWithXAttr {
private static Configuration conf;
private static MiniDFSCluster cluster;
//xattrs
private static final String name1 = "user.a1";
private static final byte[] value1 = {0x31, 0x32, 0x33};
private static final byte[] newValue1 = {0x31, 0x31, 0x31};
private static final String name2 = "user.a2";
private static final byte[] value2 = {0x37, 0x38, 0x39};
private static final String name3 = "user.a3";
private static final byte[] value3 = {};
@BeforeClass
public static void setUp() throws IOException {
conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
}
@AfterClass
public static void tearDown() {
cluster.shutdown();
}
private void testXAttr(boolean persistNamespace) throws IOException {
Path path = new Path("/p");
DistributedFileSystem fs = cluster.getFileSystem();
fs.create(path).close();
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path, name3, null, EnumSet.of(XAttrSetFlag.CREATE));
restart(fs, persistNamespace);
Map<String, byte[]> xattrs = fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(), 3);
Assert.assertArrayEquals(value1, xattrs.get(name1));
Assert.assertArrayEquals(value2, xattrs.get(name2));
Assert.assertArrayEquals(value3, xattrs.get(name3));
fs.setXAttr(path, name1, newValue1, EnumSet.of(XAttrSetFlag.REPLACE));
restart(fs, persistNamespace);
xattrs = fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(), 3);
Assert.assertArrayEquals(newValue1, xattrs.get(name1));
Assert.assertArrayEquals(value2, xattrs.get(name2));
Assert.assertArrayEquals(value3, xattrs.get(name3));
fs.removeXAttr(path, name1);
fs.removeXAttr(path, name2);
fs.removeXAttr(path, name3);
restart(fs, persistNamespace);
xattrs = fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(), 0);
}
@Test
public void testPersistXAttr() throws IOException {
testXAttr(true);
}
@Test
public void testXAttrEditLog() throws IOException {
testXAttr(false);
}
/**
* Restart the NameNode, optionally saving a new checkpoint.
*
* @param fs DistributedFileSystem used for saving namespace
* @param persistNamespace boolean true to save a new checkpoint
* @throws IOException if restart fails
*/
private void restart(DistributedFileSystem fs, boolean persistNamespace)
throws IOException {
if (persistNamespace) {
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
fs.saveNamespace();
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
}
cluster.restartNameNode();
cluster.waitActive();
}
}
| 4,469 | 32.358209 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption.IMPORT;
import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.File;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.net.InetAddress;
import java.net.URI;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.LogVerificationAppender;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.util.MD5FileUtils;
import org.apache.hadoop.io.MD5Hash;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.util.ExitUtil.ExitException;
import org.apache.hadoop.util.ExitUtil;
import org.apache.hadoop.util.StringUtils;
import org.apache.log4j.Logger;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import javax.management.MBeanServer;
import javax.management.ObjectName;
/**
* Startup and checkpoint tests
*
*/
public class TestStartup {
public static final String NAME_NODE_HOST = "localhost:";
public static final String WILDCARD_HTTP_HOST = "0.0.0.0:";
private static final Log LOG =
LogFactory.getLog(TestStartup.class.getName());
private Configuration config;
private File hdfsDir=null;
static final long seed = 0xAAAAEEFL;
static final int blockSize = 4096;
static final int fileSize = 8192;
private long editsLength=0, fsimageLength=0;
private void writeFile(FileSystem fileSys, Path name, int repl)
throws IOException {
FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
(short) repl, blockSize);
byte[] buffer = new byte[fileSize];
Random rand = new Random(seed);
rand.nextBytes(buffer);
stm.write(buffer);
stm.close();
}
@Before
public void setUp() throws Exception {
ExitUtil.disableSystemExit();
ExitUtil.resetFirstExitException();
config = new HdfsConfiguration();
hdfsDir = new File(MiniDFSCluster.getBaseDirectory());
if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
}
LOG.info("--hdfsdir is " + hdfsDir.getAbsolutePath());
config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
fileAsURI(new File(hdfsDir, "name")).toString());
config.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
new File(hdfsDir, "data").getPath());
config.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "0.0.0.0:0");
config.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
config.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0");
config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,
fileAsURI(new File(hdfsDir, "secondary")).toString());
config.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
WILDCARD_HTTP_HOST + "0");
FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0");
}
/**
* clean up
*/
@After
public void tearDown() throws Exception {
if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
throw new IOException("Could not delete hdfs directory in tearDown '" + hdfsDir + "'");
}
}
/**
* Create a number of fsimage checkpoints
* @param count number of checkpoints to create
* @throws IOException
*/
public void createCheckPoint(int count) throws IOException {
LOG.info("--starting mini cluster");
// manage dirs parameter set to false
MiniDFSCluster cluster = null;
SecondaryNameNode sn = null;
try {
cluster = new MiniDFSCluster.Builder(config)
.manageDataDfsDirs(false)
.manageNameDfsDirs(false).build();
cluster.waitActive();
LOG.info("--starting Secondary Node");
// start secondary node
sn = new SecondaryNameNode(config);
assertNotNull(sn);
// Create count new files and checkpoints
for (int i=0; i<count; i++) {
// create a file
FileSystem fileSys = cluster.getFileSystem();
Path p = new Path("t" + i);
this.writeFile(fileSys, p, 1);
LOG.info("--file " + p.toString() + " created");
LOG.info("--doing checkpoint");
sn.doCheckpoint(); // this shouldn't fail
LOG.info("--done checkpoint");
}
} catch (IOException e) {
fail(StringUtils.stringifyException(e));
System.err.println("checkpoint failed");
throw e;
} finally {
if(sn!=null)
sn.shutdown();
if(cluster!=null)
cluster.shutdown();
LOG.info("--cluster shutdown");
}
}
/**
* Corrupts the MD5 sum of the fsimage.
*
* @param corruptAll
* whether to corrupt one or all of the MD5 sums in the configured
* namedirs
* @throws IOException
*/
private void corruptFSImageMD5(boolean corruptAll) throws IOException {
List<URI> nameDirs = (List<URI>)FSNamesystem.getNamespaceDirs(config);
// Corrupt the md5 files in all the namedirs
for (URI uri: nameDirs) {
// Directory layout looks like:
// test/data/dfs/nameN/current/{fsimage,edits,...}
File nameDir = new File(uri.getPath());
File dfsDir = nameDir.getParentFile();
assertEquals(dfsDir.getName(), "dfs"); // make sure we got right dir
// Set the md5 file to all zeros
File imageFile = new File(nameDir,
Storage.STORAGE_DIR_CURRENT + "/"
+ NNStorage.getImageFileName(0));
MD5FileUtils.saveMD5File(imageFile, new MD5Hash(new byte[16]));
// Only need to corrupt one if !corruptAll
if (!corruptAll) {
break;
}
}
}
/*
* corrupt files by removing and recreating the directory
*/
private void corruptNameNodeFiles() throws IOException {
// now corrupt/delete the directrory
List<URI> nameDirs = (List<URI>)FSNamesystem.getNamespaceDirs(config);
List<URI> nameEditsDirs = FSNamesystem.getNamespaceEditsDirs(config);
// get name dir and its length, then delete and recreate the directory
File dir = new File(nameDirs.get(0).getPath()); // has only one
this.fsimageLength = new File(new File(dir, Storage.STORAGE_DIR_CURRENT),
NameNodeFile.IMAGE.getName()).length();
if(dir.exists() && !(FileUtil.fullyDelete(dir)))
throw new IOException("Cannot remove directory: " + dir);
LOG.info("--removed dir "+dir + ";len was ="+ this.fsimageLength);
if (!dir.mkdirs())
throw new IOException("Cannot create directory " + dir);
dir = new File( nameEditsDirs.get(0).getPath()); //has only one
this.editsLength = new File(new File(dir, Storage.STORAGE_DIR_CURRENT),
NameNodeFile.EDITS.getName()).length();
if(dir.exists() && !(FileUtil.fullyDelete(dir)))
throw new IOException("Cannot remove directory: " + dir);
if (!dir.mkdirs())
throw new IOException("Cannot create directory " + dir);
LOG.info("--removed dir and recreated "+dir + ";len was ="+ this.editsLength);
}
/**
* start with -importCheckpoint option and verify that the files are in separate directories and of the right length
* @throws IOException
*/
private void checkNameNodeFiles() throws IOException{
// start namenode with import option
LOG.info("-- about to start DFS cluster");
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(config)
.format(false)
.manageDataDfsDirs(false)
.manageNameDfsDirs(false)
.startupOption(IMPORT).build();
cluster.waitActive();
LOG.info("--NN started with checkpoint option");
NameNode nn = cluster.getNameNode();
assertNotNull(nn);
// Verify that image file sizes did not change.
FSImage image = nn.getFSImage();
verifyDifferentDirs(image, this.fsimageLength, this.editsLength);
} finally {
if(cluster != null)
cluster.shutdown();
}
}
/**
* verify that edits log and fsimage are in different directories and of a correct size
*/
private void verifyDifferentDirs(FSImage img, long expectedImgSize, long expectedEditsSize) {
StorageDirectory sd =null;
for (Iterator<StorageDirectory> it = img.getStorage().dirIterator(); it.hasNext();) {
sd = it.next();
if(sd.getStorageDirType().isOfType(NameNodeDirType.IMAGE)) {
img.getStorage();
File imf = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE, 0);
LOG.info("--image file " + imf.getAbsolutePath() + "; len = " + imf.length() + "; expected = " + expectedImgSize);
assertEquals(expectedImgSize, imf.length());
} else if(sd.getStorageDirType().isOfType(NameNodeDirType.EDITS)) {
img.getStorage();
File edf = NNStorage.getStorageFile(sd, NameNodeFile.EDITS, 0);
LOG.info("-- edits file " + edf.getAbsolutePath() + "; len = " + edf.length() + "; expected = " + expectedEditsSize);
assertEquals(expectedEditsSize, edf.length());
} else {
fail("Image/Edits directories are not different");
}
}
}
/**
* secnn-6
* checkpoint for edits and image is the same directory
* @throws IOException
*/
@Test
public void testChkpointStartup2() throws IOException{
LOG.info("--starting checkpointStartup2 - same directory for checkpoint");
// different name dirs
config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
fileAsURI(new File(hdfsDir, "name")).toString());
config.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
fileAsURI(new File(hdfsDir, "edits")).toString());
// same checkpoint dirs
config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY,
fileAsURI(new File(hdfsDir, "chkpt")).toString());
config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,
fileAsURI(new File(hdfsDir, "chkpt")).toString());
createCheckPoint(1);
corruptNameNodeFiles();
checkNameNodeFiles();
}
/**
* seccn-8
* checkpoint for edits and image are different directories
* @throws IOException
*/
@Test
public void testChkpointStartup1() throws IOException{
//setUpConfig();
LOG.info("--starting testStartup Recovery");
// different name dirs
config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
fileAsURI(new File(hdfsDir, "name")).toString());
config.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
fileAsURI(new File(hdfsDir, "edits")).toString());
// same checkpoint dirs
config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY,
fileAsURI(new File(hdfsDir, "chkpt_edits")).toString());
config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,
fileAsURI(new File(hdfsDir, "chkpt")).toString());
createCheckPoint(1);
corruptNameNodeFiles();
checkNameNodeFiles();
}
/**
* secnn-7
* secondary node copies fsimage and edits into correct separate directories.
* @throws IOException
*/
@Test
public void testSNNStartup() throws IOException{
//setUpConfig();
LOG.info("--starting SecondNN startup test");
// different name dirs
config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
fileAsURI(new File(hdfsDir, "name")).toString());
config.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
fileAsURI(new File(hdfsDir, "name")).toString());
// same checkpoint dirs
config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY,
fileAsURI(new File(hdfsDir, "chkpt_edits")).toString());
config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,
fileAsURI(new File(hdfsDir, "chkpt")).toString());
LOG.info("--starting NN ");
MiniDFSCluster cluster = null;
SecondaryNameNode sn = null;
NameNode nn = null;
try {
cluster = new MiniDFSCluster.Builder(config).manageDataDfsDirs(false)
.manageNameDfsDirs(false)
.build();
cluster.waitActive();
nn = cluster.getNameNode();
assertNotNull(nn);
// start secondary node
LOG.info("--starting SecondNN");
sn = new SecondaryNameNode(config);
assertNotNull(sn);
LOG.info("--doing checkpoint");
sn.doCheckpoint(); // this shouldn't fail
LOG.info("--done checkpoint");
// now verify that image and edits are created in the different directories
FSImage image = nn.getFSImage();
StorageDirectory sd = image.getStorage().getStorageDir(0); //only one
assertEquals(sd.getStorageDirType(), NameNodeDirType.IMAGE_AND_EDITS);
image.getStorage();
File imf = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE, 0);
image.getStorage();
File edf = NNStorage.getStorageFile(sd, NameNodeFile.EDITS, 0);
LOG.info("--image file " + imf.getAbsolutePath() + "; len = " + imf.length());
LOG.info("--edits file " + edf.getAbsolutePath() + "; len = " + edf.length());
FSImage chkpImage = sn.getFSImage();
verifyDifferentDirs(chkpImage, imf.length(), edf.length());
} catch (IOException e) {
fail(StringUtils.stringifyException(e));
System.err.println("checkpoint failed");
throw e;
} finally {
if(sn!=null)
sn.shutdown();
if(cluster!=null)
cluster.shutdown();
}
}
@Test(timeout = 30000)
public void testSNNStartupWithRuntimeException() throws Exception {
String[] argv = new String[] { "-checkpoint" };
try {
SecondaryNameNode.main(argv);
fail("Failed to handle runtime exceptions during SNN startup!");
} catch (ExitException ee) {
GenericTestUtils.assertExceptionContains("ExitException", ee);
assertTrue("Didn't termiated properly ", ExitUtil.terminateCalled());
}
}
@Test
public void testCompression() throws IOException {
LOG.info("Test compressing image.");
Configuration conf = new Configuration();
FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");
File base_dir = new File(PathUtils.getTestDir(getClass()), "dfs/");
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
new File(base_dir, "name").getPath());
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
DFSTestUtil.formatNameNode(conf);
// create an uncompressed image
LOG.info("Create an uncompressed fsimage");
NameNode namenode = new NameNode(conf);
namenode.getNamesystem().mkdirs("/test",
new PermissionStatus("hairong", null, FsPermission.getDefault()), true);
NamenodeProtocols nnRpc = namenode.getRpcServer();
assertTrue(nnRpc.getFileInfo("/test").isDir());
nnRpc.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
nnRpc.saveNamespace();
namenode.stop();
namenode.join();
// compress image using default codec
LOG.info("Read an uncomressed image and store it compressed using default codec.");
conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, true);
checkNameSpace(conf);
// read image compressed using the default and compress it using Gzip codec
LOG.info("Read a compressed image and store it using a different codec.");
conf.set(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY,
"org.apache.hadoop.io.compress.GzipCodec");
checkNameSpace(conf);
// read an image compressed in Gzip and store it uncompressed
LOG.info("Read a compressed image and store it as uncompressed.");
conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, false);
checkNameSpace(conf);
// read an uncomrpessed image and store it uncompressed
LOG.info("Read an uncompressed image and store it as uncompressed.");
checkNameSpace(conf);
}
private void checkNameSpace(Configuration conf) throws IOException {
NameNode namenode = new NameNode(conf);
NamenodeProtocols nnRpc = namenode.getRpcServer();
assertTrue(nnRpc.getFileInfo("/test").isDir());
nnRpc.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
nnRpc.saveNamespace();
namenode.stop();
namenode.join();
}
@Test
public void testImageChecksum() throws Exception {
LOG.info("Test uncompressed image checksum");
testImageChecksum(false);
LOG.info("Test compressed image checksum");
testImageChecksum(true);
}
private void testImageChecksum(boolean compress) throws Exception {
MiniDFSCluster cluster = null;
if (compress) {
config.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY, true);
}
try {
LOG.info("\n===========================================\n" +
"Starting empty cluster");
cluster = new MiniDFSCluster.Builder(config)
.numDataNodes(0)
.format(true)
.build();
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
fs.mkdirs(new Path("/test"));
LOG.info("Shutting down cluster #1");
cluster.shutdown();
cluster = null;
// Corrupt the md5 files in all the namedirs
corruptFSImageMD5(true);
// Attach our own log appender so we can verify output
final LogVerificationAppender appender = new LogVerificationAppender();
final Logger logger = Logger.getRootLogger();
logger.addAppender(appender);
// Try to start a new cluster
LOG.info("\n===========================================\n" +
"Starting same cluster after simulated crash");
try {
cluster = new MiniDFSCluster.Builder(config)
.numDataNodes(0)
.format(false)
.build();
fail("Should not have successfully started with corrupt image");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains(
"Failed to load an FSImage file!", ioe);
int md5failures = appender.countExceptionsWithMessage(
" is corrupt with MD5 checksum of ");
// Two namedirs, so should have seen two failures
assertEquals(2, md5failures);
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test(timeout=30000)
public void testCorruptImageFallback() throws IOException {
// Create two checkpoints
createCheckPoint(2);
// Delete a single md5sum
corruptFSImageMD5(false);
// Should still be able to start
MiniDFSCluster cluster = new MiniDFSCluster.Builder(config)
.format(false)
.manageDataDfsDirs(false)
.manageNameDfsDirs(false)
.build();
try {
cluster.waitActive();
} finally {
cluster.shutdown();
}
}
/**
* This test tests hosts include list contains host names. After namenode
* restarts, the still alive datanodes should not have any trouble in getting
* registrant again.
*/
@Test
public void testNNRestart() throws IOException, InterruptedException {
MiniDFSCluster cluster = null;
FileSystem localFileSys;
Path hostsFile;
Path excludeFile;
int HEARTBEAT_INTERVAL = 1; // heartbeat interval in seconds
// Set up the hosts/exclude files.
localFileSys = FileSystem.getLocal(config);
Path workingDir = localFileSys.getWorkingDirectory();
Path dir = new Path(workingDir, "build/test/data/work-dir/restartnn");
hostsFile = new Path(dir, "hosts");
excludeFile = new Path(dir, "exclude");
// Setup conf
config.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
writeConfigFile(localFileSys, excludeFile, null);
config.set(DFSConfigKeys.DFS_HOSTS, hostsFile.toUri().getPath());
// write into hosts file
ArrayList<String>list = new ArrayList<String>();
byte b[] = {127, 0, 0, 1};
InetAddress inetAddress = InetAddress.getByAddress(b);
list.add(inetAddress.getHostName());
writeConfigFile(localFileSys, hostsFile, list);
int numDatanodes = 1;
try {
cluster = new MiniDFSCluster.Builder(config)
.numDataNodes(numDatanodes).setupHostsFile(true).build();
cluster.waitActive();
cluster.restartNameNode();
NamenodeProtocols nn = cluster.getNameNodeRpc();
assertNotNull(nn);
assertTrue(cluster.isDataNodeUp());
DatanodeInfo[] info = nn.getDatanodeReport(DatanodeReportType.LIVE);
for (int i = 0 ; i < 5 && info.length != numDatanodes; i++) {
Thread.sleep(HEARTBEAT_INTERVAL * 1000);
info = nn.getDatanodeReport(DatanodeReportType.LIVE);
}
assertEquals("Number of live nodes should be "+numDatanodes, numDatanodes,
info.length);
} catch (IOException e) {
fail(StringUtils.stringifyException(e));
throw e;
} finally {
cleanupFile(localFileSys, excludeFile.getParent());
if (cluster != null) {
cluster.shutdown();
}
}
}
private void writeConfigFile(FileSystem localFileSys, Path name,
ArrayList<String> nodes) throws IOException {
// delete if it already exists
if (localFileSys.exists(name)) {
localFileSys.delete(name, true);
}
FSDataOutputStream stm = localFileSys.create(name);
if (nodes != null) {
for (Iterator<String> it = nodes.iterator(); it.hasNext();) {
String node = it.next();
stm.writeBytes(node);
stm.writeBytes("\n");
}
}
stm.close();
}
private void cleanupFile(FileSystem fileSys, Path name) throws IOException {
assertTrue(fileSys.exists(name));
fileSys.delete(name, true);
assertTrue(!fileSys.exists(name));
}
@Test(timeout = 120000)
public void testXattrConfiguration() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
try {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY, -1);
cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build();
fail("Expected exception with negative xattr size");
} catch (IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains(
"Cannot set a negative value for the maximum size of an xattr", e);
} finally {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY,
DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_DEFAULT);
if (cluster != null) {
cluster.shutdown();
}
}
try {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY, -1);
cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build();
fail("Expected exception with negative # xattrs per inode");
} catch (IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains(
"Cannot set a negative limit on the number of xattrs per inode", e);
} finally {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY,
DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_DEFAULT);
if (cluster != null) {
cluster.shutdown();
}
}
try {
// Set up a logger to check log message
final LogVerificationAppender appender = new LogVerificationAppender();
final Logger logger = Logger.getRootLogger();
logger.addAppender(appender);
int count = appender.countLinesWithMessage(
"Maximum size of an xattr: 0 (unlimited)");
assertEquals("Expected no messages about unlimited xattr size", 0, count);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY, 0);
cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build();
count = appender.countLinesWithMessage(
"Maximum size of an xattr: 0 (unlimited)");
// happens twice because we format then run
assertEquals("Expected unlimited xattr size", 2, count);
} finally {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY,
DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_DEFAULT);
if (cluster != null) {
cluster.shutdown();
}
}
}
/**
* Verify the following scenario.
* 1. NN restarts.
* 2. Heartbeat RPC will retry and succeed. NN asks DN to reregister.
* 3. After reregistration completes, DN will send Heartbeat, followed by
* Blockreport.
* 4. NN will mark DatanodeStorageInfo#blockContentsStale to false.
* @throws Exception
*/
@Test(timeout = 60000)
public void testStorageBlockContentsStaleAfterNNRestart() throws Exception {
MiniDFSCluster dfsCluster = null;
try {
Configuration config = new Configuration();
dfsCluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
dfsCluster.waitActive();
dfsCluster.restartNameNode(true);
BlockManagerTestUtil.checkHeartbeat(
dfsCluster.getNamesystem().getBlockManager());
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanNameFsns = new ObjectName(
"Hadoop:service=NameNode,name=FSNamesystemState");
Integer numStaleStorages = (Integer) (mbs.getAttribute(
mxbeanNameFsns, "NumStaleStorages"));
assertEquals(0, numStaleStorages.intValue());
} finally {
if (dfsCluster != null) {
dfsCluster.shutdown();
}
}
return;
}
}
| 27,798 | 35.577632 | 126 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.File;
import java.io.IOException;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.apache.hadoop.hdfs.server.common.Storage;
/**
*
* CreateEditsLog
* Synopsis: CreateEditsLog -f numFiles StartingBlockId numBlocksPerFile
* [-r replicafactor] [-d editsLogDirectory]
* Default replication factor is 1
* Default edits log directory is /tmp/EditsLogOut
*
* Create a name node's edits log in /tmp/EditsLogOut.
* The file /tmp/EditsLogOut/current/edits can be copied to a name node's
* dfs.namenode.name.dir/current direcotry and the name node can be started as usual.
*
* The files are created in /createdViaInjectingInEditsLog
* The file names contain the starting and ending blockIds; hence once can
* create multiple edits logs using this command using non overlapping
* block ids and feed the files to a single name node.
*
* See Also @link #DataNodeCluster for injecting a set of matching
* blocks created with this command into a set of simulated data nodes.
*
*/
public class CreateEditsLog {
static final String BASE_PATH = "/createdViaInjectingInEditsLog";
static final String EDITS_DIR = "/tmp/EditsLogOut";
static String edits_dir = EDITS_DIR;
static final public long BLOCK_GENERATION_STAMP =
GenerationStamp.LAST_RESERVED_STAMP;
static void addFiles(FSEditLog editLog, int numFiles, short replication,
int blocksPerFile, long startingBlockId, long blockSize,
FileNameGenerator nameGenerator) {
PermissionStatus p = new PermissionStatus("joeDoe", "people",
new FsPermission((short)0777));
INodeId inodeId = new INodeId();
INodeDirectory dirInode = new INodeDirectory(inodeId.nextValue(), null, p,
0L);
editLog.logMkDir(BASE_PATH, dirInode);
BlockInfo[] blocks = new BlockInfo[blocksPerFile];
for (int iB = 0; iB < blocksPerFile; ++iB) {
blocks[iB] =
new BlockInfoContiguous(new Block(0, blockSize, BLOCK_GENERATION_STAMP),
replication);
}
long currentBlockId = startingBlockId;
long bidAtSync = startingBlockId;
for (int iF = 0; iF < numFiles; iF++) {
for (int iB = 0; iB < blocksPerFile; ++iB) {
blocks[iB].setBlockId(currentBlockId++);
}
final INodeFile inode = new INodeFile(inodeId.nextValue(), null,
p, 0L, 0L, blocks, replication, blockSize, (byte)0);
inode.toUnderConstruction("", "");
// Append path to filename with information about blockIDs
String path = "_" + iF + "_B" + blocks[0].getBlockId() +
"_to_B" + blocks[blocksPerFile-1].getBlockId() + "_";
String filePath = nameGenerator.getNextFileName("");
filePath = filePath + path;
// Log the new sub directory in edits
if ((iF % nameGenerator.getFilesPerDirectory()) == 0) {
String currentDir = nameGenerator.getCurrentDir();
dirInode = new INodeDirectory(inodeId.nextValue(), null, p, 0L);
editLog.logMkDir(currentDir, dirInode);
}
INodeFile fileUc = new INodeFile(inodeId.nextValue(), null,
p, 0L, 0L, BlockInfo.EMPTY_ARRAY, replication, blockSize);
fileUc.toUnderConstruction("", "");
editLog.logOpenFile(filePath, fileUc, false, false);
editLog.logCloseFile(filePath, inode);
if (currentBlockId - bidAtSync >= 2000) { // sync every 2K blocks
editLog.logSync();
bidAtSync = currentBlockId;
}
}
System.out.println("Created edits log in directory " + edits_dir);
System.out.println(" containing " +
numFiles + " File-Creates, each file with " + blocksPerFile + " blocks");
System.out.println(" blocks range: " +
startingBlockId + " to " + (currentBlockId-1));
}
static final String usage = "Usage: createditlogs " +
" -f numFiles startingBlockIds NumBlocksPerFile [-r replicafactor] " +
"[-d editsLogDirectory]\n" +
" Default replication factor is 1\n" +
" Default edits log direcory is " + EDITS_DIR + "\n";
static void printUsageExit() {
System.out.println(usage);
System.exit(-1);
}
static void printUsageExit(String err) {
System.out.println(err);
printUsageExit();
}
/**
* @param args arguments
* @throws IOException
*/
public static void main(String[] args) throws IOException {
long startingBlockId = 1;
int numFiles = 0;
short replication = 1;
int numBlocksPerFile = 0;
long blockSize = 10;
if (args.length == 0) {
printUsageExit();
}
for (int i = 0; i < args.length; i++) { // parse command line
if (args[i].equals("-h"))
printUsageExit();
if (args[i].equals("-f")) {
if (i + 3 >= args.length || args[i+1].startsWith("-") ||
args[i+2].startsWith("-") || args[i+3].startsWith("-")) {
printUsageExit(
"Missing num files, starting block and/or number of blocks");
}
numFiles = Integer.parseInt(args[++i]);
startingBlockId = Integer.parseInt(args[++i]);
numBlocksPerFile = Integer.parseInt(args[++i]);
if (numFiles <=0 || numBlocksPerFile <= 0) {
printUsageExit("numFiles and numBlocksPerFile most be greater than 0");
}
} else if (args[i].equals("-l")) {
if (i + 1 >= args.length) {
printUsageExit(
"Missing block length");
}
blockSize = Long.parseLong(args[++i]);
} else if (args[i].equals("-r") || args[i+1].startsWith("-")) {
if (i + 1 >= args.length) {
printUsageExit(
"Missing replication factor");
}
replication = Short.parseShort(args[++i]);
} else if (args[i].equals("-d")) {
if (i + 1 >= args.length || args[i+1].startsWith("-")) {
printUsageExit("Missing edits logs directory");
}
edits_dir = args[++i];
} else {
printUsageExit();
}
}
File editsLogDir = new File(edits_dir);
File subStructureDir = new File(edits_dir + "/" +
Storage.STORAGE_DIR_CURRENT);
if ( !editsLogDir.exists() ) {
if ( !editsLogDir.mkdir()) {
System.out.println("cannot create " + edits_dir);
System.exit(-1);
}
}
if ( !subStructureDir.exists() ) {
if ( !subStructureDir.mkdir()) {
System.out.println("cannot create subdirs of " + edits_dir);
System.exit(-1);
}
}
FileNameGenerator nameGenerator = new FileNameGenerator(BASE_PATH, 100);
FSEditLog editLog = FSImageTestUtil.createStandaloneEditLog(editsLogDir);
editLog.openForWrite(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
addFiles(editLog, numFiles, replication, numBlocksPerFile, startingBlockId,
blockSize, nameGenerator);
editLog.logSync();
editLog.close();
}
}
| 8,118 | 37.117371 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryWebUi.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import javax.management.AttributeNotFoundException;
import javax.management.InstanceNotFoundException;
import javax.management.MBeanException;
import javax.management.MBeanServer;
import javax.management.MalformedObjectNameException;
import javax.management.ObjectName;
import javax.management.ReflectionException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestSecondaryWebUi {
private static MiniDFSCluster cluster;
private static SecondaryNameNode snn;
private static final Configuration conf = new Configuration();
@BeforeClass
public static void setUpCluster() throws IOException {
conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
"0.0.0.0:0");
conf.setLong(DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 500);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
.build();
cluster.waitActive();
snn = new SecondaryNameNode(conf);
}
@AfterClass
public static void shutDownCluster() {
if (cluster != null) {
cluster.shutdown();
}
if (snn != null) {
snn.shutdown();
}
}
@Test
public void testSecondaryWebUi()
throws IOException, MalformedObjectNameException,
AttributeNotFoundException, MBeanException,
ReflectionException, InstanceNotFoundException {
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName = new ObjectName(
"Hadoop:service=SecondaryNameNode,name=SecondaryNameNodeInfo");
String[] checkpointDir = (String[]) mbs.getAttribute(mxbeanName,
"CheckpointDirectories");
Assert.assertArrayEquals(checkpointDir, snn.getCheckpointDirectories());
String[] checkpointEditlogDir = (String[]) mbs.getAttribute(mxbeanName,
"CheckpointEditlogDirectories");
Assert.assertArrayEquals(checkpointEditlogDir,
snn.getCheckpointEditlogDirectories());
}
}
| 3,142 | 35.126437 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogAtDebug.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import com.google.common.base.Joiner;
import com.google.common.base.Optional;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.DefaultAuditLogger;
import org.apache.log4j.Level;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
import java.net.Inet4Address;
import java.util.Arrays;
import java.util.List;
import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.*;
/**
* Test that the HDFS Audit logger respects DFS_NAMENODE_AUDIT_LOG_DEBUG_CMDLIST.
*/
public class TestAuditLogAtDebug {
static final Log LOG = LogFactory.getLog(TestAuditLogAtDebug.class);
@Rule
public Timeout timeout = new Timeout(300000);
private static final String DUMMY_COMMAND_1 = "dummycommand1";
private static final String DUMMY_COMMAND_2 = "dummycommand2";
private DefaultAuditLogger makeSpyLogger(
Level level, Optional<List<String>> debugCommands) {
DefaultAuditLogger logger = new DefaultAuditLogger();
Configuration conf = new HdfsConfiguration();
if (debugCommands.isPresent()) {
conf.set(DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_DEBUG_CMDLIST,
Joiner.on(",").join(debugCommands.get()));
}
logger.initialize(conf);
((Log4JLogger) FSNamesystem.auditLog).getLogger().setLevel(level);
return spy(logger);
}
private void logDummyCommandToAuditLog(HdfsAuditLogger logger, String command) {
logger.logAuditEvent(true, "",
Inet4Address.getLoopbackAddress(),
command, "", "",
null, null, null);
}
@Test
public void testDebugCommandNotLoggedAtInfo() {
DefaultAuditLogger logger =
makeSpyLogger(
Level.INFO, Optional.of(Arrays.asList(DUMMY_COMMAND_1)));
logDummyCommandToAuditLog(logger, DUMMY_COMMAND_1);
verify(logger, never()).logAuditMessage(anyString());
}
@Test
public void testDebugCommandLoggedAtDebug() {
DefaultAuditLogger logger =
makeSpyLogger(
Level.DEBUG, Optional.of(Arrays.asList(DUMMY_COMMAND_1)));
logDummyCommandToAuditLog(logger, DUMMY_COMMAND_1);
verify(logger, times(1)).logAuditMessage(anyString());
}
@Test
public void testInfoCommandLoggedAtInfo() {
DefaultAuditLogger logger =
makeSpyLogger(
Level.INFO, Optional.of(Arrays.asList(DUMMY_COMMAND_1)));
logDummyCommandToAuditLog(logger, DUMMY_COMMAND_2);
verify(logger, times(1)).logAuditMessage(anyString());
}
@Test
public void testMultipleDebugCommandsNotLoggedAtInfo() {
DefaultAuditLogger logger =
makeSpyLogger(
Level.INFO,
Optional.of(Arrays.asList(DUMMY_COMMAND_1, DUMMY_COMMAND_2)));
logDummyCommandToAuditLog(logger, DUMMY_COMMAND_1);
logDummyCommandToAuditLog(logger, DUMMY_COMMAND_2);
verify(logger, never()).logAuditMessage(anyString());
}
@Test
public void testMultipleDebugCommandsLoggedAtDebug() {
DefaultAuditLogger logger =
makeSpyLogger(
Level.DEBUG,
Optional.of(Arrays.asList(DUMMY_COMMAND_1, DUMMY_COMMAND_2)));
logDummyCommandToAuditLog(logger, DUMMY_COMMAND_1);
logDummyCommandToAuditLog(logger, DUMMY_COMMAND_2);
verify(logger, times(2)).logAuditMessage(anyString());
}
@Test
public void testEmptyDebugCommands() {
DefaultAuditLogger logger = makeSpyLogger(
Level.INFO, Optional.<List<String>>absent());
logDummyCommandToAuditLog(logger, DUMMY_COMMAND_1);
logDummyCommandToAuditLog(logger, DUMMY_COMMAND_2);
verify(logger, times(2)).logAuditMessage(anyString());
}
}
| 4,764 | 35.098485 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.EnumSet;
import java.util.List;
import com.google.common.base.Preconditions;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.CryptoProtocolVersion;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataStorage;
import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.net.DNS;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.security.Groups;
import org.apache.hadoop.security.RefreshUserMappingsProtocol;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.util.VersionInfo;
import org.apache.log4j.Level;
import org.apache.log4j.LogManager;
/**
* Main class for a series of name-node benchmarks.
*
* Each benchmark measures throughput and average execution time
* of a specific name-node operation, e.g. file creation or block reports.
*
* The benchmark does not involve any other hadoop components
* except for the name-node. Each operation is executed
* by calling directly the respective name-node method.
* The name-node here is real all other components are simulated.
*
* Command line arguments for the benchmark include:
* <ol>
* <li>total number of operations to be performed,</li>
* <li>number of threads to run these operations,</li>
* <li>followed by operation specific input parameters.</li>
* <li>-logLevel L specifies the logging level when the benchmark runs.
* The default logging level is {@link Level#ERROR}.</li>
* <li>-UGCacheRefreshCount G will cause the benchmark to call
* {@link NameNodeRpcServer#refreshUserToGroupsMappings} after
* every G operations, which purges the name-node's user group cache.
* By default the refresh is never called.</li>
* <li>-keepResults do not clean up the name-space after execution.</li>
* <li>-useExisting do not recreate the name-space, use existing data.</li>
* <li>-namenode will run the test against a namenode in another
* process or on another host. If you use this option, the namenode
* must have dfs.namenode.fs-limits.min-block-size set to 16.</li>
* </ol>
*
* The benchmark first generates inputs for each thread so that the
* input generation overhead does not effect the resulting statistics.
* The number of operations performed by threads is practically the same.
* Precisely, the difference between the number of operations
* performed by any two threads does not exceed 1.
*
* Then the benchmark executes the specified number of operations using
* the specified number of threads and outputs the resulting stats.
*/
public class NNThroughputBenchmark implements Tool {
private static final Log LOG = LogFactory.getLog(NNThroughputBenchmark.class);
private static final int BLOCK_SIZE = 16;
private static final String GENERAL_OPTIONS_USAGE =
" [-keepResults] | [-logLevel L] | [-UGCacheRefreshCount G] |" +
" [-namenode <namenode URI>]\n" +
" If using -namenode, set the namenode's" +
" dfs.namenode.fs-limits.min-block-size to 16.";
static Configuration config;
static NameNode nameNode;
static NamenodeProtocol nameNodeProto;
static ClientProtocol clientProto;
static DatanodeProtocol dataNodeProto;
static RefreshUserMappingsProtocol refreshUserMappingsProto;
static String bpid = null;
private String namenodeUri = null; // NN URI to use, if specified
NNThroughputBenchmark(Configuration conf) throws IOException {
config = conf;
// We do not need many handlers, since each thread simulates a handler
// by calling name-node methods directly
config.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 1);
// Turn off minimum block size verification
config.setInt(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
// set exclude file
config.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE,
"${hadoop.tmp.dir}/dfs/hosts/exclude");
File excludeFile = new File(config.get(DFSConfigKeys.DFS_HOSTS_EXCLUDE,
"exclude"));
if(!excludeFile.exists()) {
if(!excludeFile.getParentFile().exists() && !excludeFile.getParentFile().mkdirs())
throw new IOException("NNThroughputBenchmark: cannot mkdir " + excludeFile);
}
new FileOutputStream(excludeFile).close();
// set include file
config.set(DFSConfigKeys.DFS_HOSTS, "${hadoop.tmp.dir}/dfs/hosts/include");
File includeFile = new File(config.get(DFSConfigKeys.DFS_HOSTS, "include"));
new FileOutputStream(includeFile).close();
}
void close() {
if(nameNode != null)
nameNode.stop();
}
static void setNameNodeLoggingLevel(Level logLevel) {
LOG.fatal("Log level = " + logLevel.toString());
// change log level to NameNode logs
DFSTestUtil.setNameNodeLogLevel(logLevel);
GenericTestUtils.setLogLevel(LogManager.getLogger(
NetworkTopology.class.getName()), logLevel);
GenericTestUtils.setLogLevel(LogManager.getLogger(
Groups.class.getName()), logLevel);
}
/**
* Base class for collecting operation statistics.
*
* Overload this class in order to run statistics for a
* specific name-node operation.
*/
abstract class OperationStatsBase {
protected static final String BASE_DIR_NAME = "/nnThroughputBenchmark";
protected static final String OP_ALL_NAME = "all";
protected static final String OP_ALL_USAGE = "-op all <other ops options>";
protected final String baseDir;
protected short replication;
protected int numThreads = 0; // number of threads
protected int numOpsRequired = 0; // number of operations requested
protected int numOpsExecuted = 0; // number of operations executed
protected long cumulativeTime = 0; // sum of times for each op
protected long elapsedTime = 0; // time from start to finish
protected boolean keepResults = false;// don't clean base directory on exit
protected Level logLevel; // logging level, ERROR by default
protected int ugcRefreshCount = 0; // user group cache refresh count
protected List<StatsDaemon> daemons;
/**
* Operation name.
*/
abstract String getOpName();
/**
* Parse command line arguments.
*
* @param args arguments
* @throws IOException
*/
abstract void parseArguments(List<String> args) throws IOException;
/**
* Generate inputs for each daemon thread.
*
* @param opsPerThread number of inputs for each thread.
* @throws IOException
*/
abstract void generateInputs(int[] opsPerThread) throws IOException;
/**
* This corresponds to the arg1 argument of
* {@link #executeOp(int, int, String)}, which can have different meanings
* depending on the operation performed.
*
* @param daemonId id of the daemon calling this method
* @return the argument
*/
abstract String getExecutionArgument(int daemonId);
/**
* Execute name-node operation.
*
* @param daemonId id of the daemon calling this method.
* @param inputIdx serial index of the operation called by the deamon.
* @param arg1 operation specific argument.
* @return time of the individual name-node call.
* @throws IOException
*/
abstract long executeOp(int daemonId, int inputIdx, String arg1) throws IOException;
/**
* Print the results of the benchmarking.
*/
abstract void printResults();
OperationStatsBase() {
baseDir = BASE_DIR_NAME + "/" + getOpName();
replication = (short) config.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
numOpsRequired = 10;
numThreads = 3;
logLevel = Level.ERROR;
ugcRefreshCount = Integer.MAX_VALUE;
}
void benchmark() throws IOException {
daemons = new ArrayList<StatsDaemon>();
long start = 0;
try {
numOpsExecuted = 0;
cumulativeTime = 0;
if(numThreads < 1)
return;
int tIdx = 0; // thread index < nrThreads
int opsPerThread[] = new int[numThreads];
for(int opsScheduled = 0; opsScheduled < numOpsRequired;
opsScheduled += opsPerThread[tIdx++]) {
// execute in a separate thread
opsPerThread[tIdx] = (numOpsRequired-opsScheduled)/(numThreads-tIdx);
if(opsPerThread[tIdx] == 0)
opsPerThread[tIdx] = 1;
}
// if numThreads > numOpsRequired then the remaining threads will do nothing
for(; tIdx < numThreads; tIdx++)
opsPerThread[tIdx] = 0;
generateInputs(opsPerThread);
setNameNodeLoggingLevel(logLevel);
for(tIdx=0; tIdx < numThreads; tIdx++)
daemons.add(new StatsDaemon(tIdx, opsPerThread[tIdx], this));
start = Time.now();
LOG.info("Starting " + numOpsRequired + " " + getOpName() + "(s).");
for(StatsDaemon d : daemons)
d.start();
} finally {
while(isInProgress()) {
// try {Thread.sleep(500);} catch (InterruptedException e) {}
}
elapsedTime = Time.now() - start;
for(StatsDaemon d : daemons) {
incrementStats(d.localNumOpsExecuted, d.localCumulativeTime);
// System.out.println(d.toString() + ": ops Exec = " + d.localNumOpsExecuted);
}
}
}
private boolean isInProgress() {
for(StatsDaemon d : daemons)
if(d.isInProgress())
return true;
return false;
}
void cleanUp() throws IOException {
clientProto.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE,
false);
if(!keepResults)
clientProto.delete(getBaseDir(), true);
}
int getNumOpsExecuted() {
return numOpsExecuted;
}
long getCumulativeTime() {
return cumulativeTime;
}
long getElapsedTime() {
return elapsedTime;
}
long getAverageTime() {
return numOpsExecuted == 0 ? 0 : cumulativeTime / numOpsExecuted;
}
double getOpsPerSecond() {
return elapsedTime == 0 ? 0 : 1000*(double)numOpsExecuted / elapsedTime;
}
String getBaseDir() {
return baseDir;
}
String getClientName(int idx) {
return getOpName() + "-client-" + idx;
}
void incrementStats(int ops, long time) {
numOpsExecuted += ops;
cumulativeTime += time;
}
/**
* Parse first 2 arguments, corresponding to the "-op" option.
*
* @param args argument list
* @return true if operation is all, which means that options not related
* to this operation should be ignored, or false otherwise, meaning
* that usage should be printed when an unrelated option is encountered.
*/
protected boolean verifyOpArgument(List<String> args) {
if(args.size() < 2 || ! args.get(0).startsWith("-op"))
printUsage();
// process common options
int krIndex = args.indexOf("-keepResults");
keepResults = (krIndex >= 0);
if(keepResults) {
args.remove(krIndex);
}
int llIndex = args.indexOf("-logLevel");
if(llIndex >= 0) {
if(args.size() <= llIndex + 1)
printUsage();
logLevel = Level.toLevel(args.get(llIndex+1), Level.ERROR);
args.remove(llIndex+1);
args.remove(llIndex);
}
int ugrcIndex = args.indexOf("-UGCacheRefreshCount");
if(ugrcIndex >= 0) {
if(args.size() <= ugrcIndex + 1)
printUsage();
int g = Integer.parseInt(args.get(ugrcIndex+1));
if(g > 0) ugcRefreshCount = g;
args.remove(ugrcIndex+1);
args.remove(ugrcIndex);
}
try {
namenodeUri = StringUtils.popOptionWithArgument("-namenode", args);
} catch (IllegalArgumentException iae) {
printUsage();
}
String type = args.get(1);
if(OP_ALL_NAME.equals(type)) {
type = getOpName();
return true;
}
if(!getOpName().equals(type))
printUsage();
return false;
}
void printStats() {
LOG.info("--- " + getOpName() + " stats ---");
LOG.info("# operations: " + getNumOpsExecuted());
LOG.info("Elapsed Time: " + getElapsedTime());
LOG.info(" Ops per sec: " + getOpsPerSecond());
LOG.info("Average Time: " + getAverageTime());
}
}
/**
* One of the threads that perform stats operations.
*/
private class StatsDaemon extends Thread {
private final int daemonId;
private int opsPerThread;
private String arg1; // argument passed to executeOp()
private volatile int localNumOpsExecuted = 0;
private volatile long localCumulativeTime = 0;
private final OperationStatsBase statsOp;
StatsDaemon(int daemonId, int nrOps, OperationStatsBase op) {
this.daemonId = daemonId;
this.opsPerThread = nrOps;
this.statsOp = op;
setName(toString());
}
@Override
public void run() {
localNumOpsExecuted = 0;
localCumulativeTime = 0;
arg1 = statsOp.getExecutionArgument(daemonId);
try {
benchmarkOne();
} catch(IOException ex) {
LOG.error("StatsDaemon " + daemonId + " failed: \n"
+ StringUtils.stringifyException(ex));
}
}
@Override
public String toString() {
return "StatsDaemon-" + daemonId;
}
void benchmarkOne() throws IOException {
for(int idx = 0; idx < opsPerThread; idx++) {
if((localNumOpsExecuted+1) % statsOp.ugcRefreshCount == 0)
refreshUserMappingsProto.refreshUserToGroupsMappings();
long stat = statsOp.executeOp(daemonId, idx, arg1);
localNumOpsExecuted++;
localCumulativeTime += stat;
}
}
boolean isInProgress() {
return localNumOpsExecuted < opsPerThread;
}
/**
* Schedule to stop this daemon.
*/
void terminate() {
opsPerThread = localNumOpsExecuted;
}
}
/**
* Clean all benchmark result directories.
*/
class CleanAllStats extends OperationStatsBase {
// Operation types
static final String OP_CLEAN_NAME = "clean";
static final String OP_CLEAN_USAGE = "-op clean";
CleanAllStats(List<String> args) {
super();
parseArguments(args);
numOpsRequired = 1;
numThreads = 1;
keepResults = true;
}
@Override
String getOpName() {
return OP_CLEAN_NAME;
}
@Override
void parseArguments(List<String> args) {
boolean ignoreUnrelatedOptions = verifyOpArgument(args);
if(args.size() > 2 && !ignoreUnrelatedOptions)
printUsage();
}
@Override
void generateInputs(int[] opsPerThread) throws IOException {
// do nothing
}
/**
* Does not require the argument
*/
@Override
String getExecutionArgument(int daemonId) {
return null;
}
/**
* Remove entire benchmark directory.
*/
@Override
long executeOp(int daemonId, int inputIdx, String ignore)
throws IOException {
clientProto.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE,
false);
long start = Time.now();
clientProto.delete(BASE_DIR_NAME, true);
long end = Time.now();
return end-start;
}
@Override
void printResults() {
LOG.info("--- " + getOpName() + " inputs ---");
LOG.info("Remove directory " + BASE_DIR_NAME);
printStats();
}
}
/**
* File creation statistics.
*
* Each thread creates the same (+ or -1) number of files.
* File names are pre-generated during initialization.
* The created files do not have blocks.
*/
class CreateFileStats extends OperationStatsBase {
// Operation types
static final String OP_CREATE_NAME = "create";
static final String OP_CREATE_USAGE =
"-op create [-threads T] [-files N] [-filesPerDir P] [-close]";
protected FileNameGenerator nameGenerator;
protected String[][] fileNames;
private boolean closeUponCreate;
CreateFileStats(List<String> args) {
super();
parseArguments(args);
}
@Override
String getOpName() {
return OP_CREATE_NAME;
}
@Override
void parseArguments(List<String> args) {
boolean ignoreUnrelatedOptions = verifyOpArgument(args);
int nrFilesPerDir = 4;
closeUponCreate = false;
for (int i = 2; i < args.size(); i++) { // parse command line
if(args.get(i).equals("-files")) {
if(i+1 == args.size()) printUsage();
numOpsRequired = Integer.parseInt(args.get(++i));
} else if(args.get(i).equals("-threads")) {
if(i+1 == args.size()) printUsage();
numThreads = Integer.parseInt(args.get(++i));
} else if(args.get(i).equals("-filesPerDir")) {
if(i+1 == args.size()) printUsage();
nrFilesPerDir = Integer.parseInt(args.get(++i));
} else if(args.get(i).equals("-close")) {
closeUponCreate = true;
} else if(!ignoreUnrelatedOptions)
printUsage();
}
nameGenerator = new FileNameGenerator(getBaseDir(), nrFilesPerDir);
}
@Override
void generateInputs(int[] opsPerThread) throws IOException {
assert opsPerThread.length == numThreads : "Error opsPerThread.length";
clientProto.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE,
false);
// int generatedFileIdx = 0;
LOG.info("Generate " + numOpsRequired + " intputs for " + getOpName());
fileNames = new String[numThreads][];
for(int idx=0; idx < numThreads; idx++) {
int threadOps = opsPerThread[idx];
fileNames[idx] = new String[threadOps];
for(int jdx=0; jdx < threadOps; jdx++)
fileNames[idx][jdx] = nameGenerator.
getNextFileName("ThroughputBench");
}
}
void dummyActionNoSynch(int daemonId, int fileIdx) {
for(int i=0; i < 2000; i++)
fileNames[daemonId][fileIdx].contains(""+i);
}
/**
* returns client name
*/
@Override
String getExecutionArgument(int daemonId) {
return getClientName(daemonId);
}
/**
* Do file create.
*/
@Override
long executeOp(int daemonId, int inputIdx, String clientName)
throws IOException {
long start = Time.now();
// dummyActionNoSynch(fileIdx);
clientProto.create(fileNames[daemonId][inputIdx], FsPermission.getDefault(),
clientName, new EnumSetWritable<CreateFlag>(EnumSet
.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)), true,
replication, BLOCK_SIZE, CryptoProtocolVersion.supported());
long end = Time.now();
for(boolean written = !closeUponCreate; !written;
written = clientProto.complete(fileNames[daemonId][inputIdx],
clientName, null, HdfsConstants.GRANDFATHER_INODE_ID));
return end-start;
}
@Override
void printResults() {
LOG.info("--- " + getOpName() + " inputs ---");
LOG.info("nrFiles = " + numOpsRequired);
LOG.info("nrThreads = " + numThreads);
LOG.info("nrFilesPerDir = " + nameGenerator.getFilesPerDirectory());
printStats();
}
}
/**
* Directory creation statistics.
*
* Each thread creates the same (+ or -1) number of directories.
* Directory names are pre-generated during initialization.
*/
class MkdirsStats extends OperationStatsBase {
// Operation types
static final String OP_MKDIRS_NAME = "mkdirs";
static final String OP_MKDIRS_USAGE = "-op mkdirs [-threads T] [-dirs N] " +
"[-dirsPerDir P]";
protected FileNameGenerator nameGenerator;
protected String[][] dirPaths;
MkdirsStats(List<String> args) {
super();
parseArguments(args);
}
@Override
String getOpName() {
return OP_MKDIRS_NAME;
}
@Override
void parseArguments(List<String> args) {
boolean ignoreUnrelatedOptions = verifyOpArgument(args);
int nrDirsPerDir = 2;
for (int i = 2; i < args.size(); i++) { // parse command line
if(args.get(i).equals("-dirs")) {
if(i+1 == args.size()) printUsage();
numOpsRequired = Integer.parseInt(args.get(++i));
} else if(args.get(i).equals("-threads")) {
if(i+1 == args.size()) printUsage();
numThreads = Integer.parseInt(args.get(++i));
} else if(args.get(i).equals("-dirsPerDir")) {
if(i+1 == args.size()) printUsage();
nrDirsPerDir = Integer.parseInt(args.get(++i));
} else if(!ignoreUnrelatedOptions)
printUsage();
}
nameGenerator = new FileNameGenerator(getBaseDir(), nrDirsPerDir);
}
@Override
void generateInputs(int[] opsPerThread) throws IOException {
assert opsPerThread.length == numThreads : "Error opsPerThread.length";
clientProto.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE,
false);
LOG.info("Generate " + numOpsRequired + " inputs for " + getOpName());
dirPaths = new String[numThreads][];
for(int idx=0; idx < numThreads; idx++) {
int threadOps = opsPerThread[idx];
dirPaths[idx] = new String[threadOps];
for(int jdx=0; jdx < threadOps; jdx++)
dirPaths[idx][jdx] = nameGenerator.
getNextFileName("ThroughputBench");
}
}
/**
* returns client name
*/
@Override
String getExecutionArgument(int daemonId) {
return getClientName(daemonId);
}
/**
* Do mkdirs operation.
*/
@Override
long executeOp(int daemonId, int inputIdx, String clientName)
throws IOException {
long start = Time.now();
clientProto.mkdirs(dirPaths[daemonId][inputIdx],
FsPermission.getDefault(), true);
long end = Time.now();
return end-start;
}
@Override
void printResults() {
LOG.info("--- " + getOpName() + " inputs ---");
LOG.info("nrDirs = " + numOpsRequired);
LOG.info("nrThreads = " + numThreads);
LOG.info("nrDirsPerDir = " + nameGenerator.getFilesPerDirectory());
printStats();
}
}
/**
* Open file statistics.
*
* Measure how many open calls (getBlockLocations())
* the name-node can handle per second.
*/
class OpenFileStats extends CreateFileStats {
// Operation types
static final String OP_OPEN_NAME = "open";
static final String OP_USAGE_ARGS =
" [-threads T] [-files N] [-filesPerDir P] [-useExisting]";
static final String OP_OPEN_USAGE =
"-op " + OP_OPEN_NAME + OP_USAGE_ARGS;
private boolean useExisting; // do not generate files, use existing ones
OpenFileStats(List<String> args) {
super(args);
}
@Override
String getOpName() {
return OP_OPEN_NAME;
}
@Override
void parseArguments(List<String> args) {
int ueIndex = args.indexOf("-useExisting");
useExisting = (ueIndex >= 0);
if(useExisting) {
args.remove(ueIndex);
}
super.parseArguments(args);
}
@Override
void generateInputs(int[] opsPerThread) throws IOException {
// create files using opsPerThread
String[] createArgs = new String[] {
"-op", "create",
"-threads", String.valueOf(this.numThreads),
"-files", String.valueOf(numOpsRequired),
"-filesPerDir",
String.valueOf(nameGenerator.getFilesPerDirectory()),
"-close"};
CreateFileStats opCreate = new CreateFileStats(Arrays.asList(createArgs));
if(!useExisting) { // create files if they were not created before
opCreate.benchmark();
LOG.info("Created " + numOpsRequired + " files.");
} else {
LOG.info("useExisting = true. Assuming "
+ numOpsRequired + " files have been created before.");
}
// use the same files for open
super.generateInputs(opsPerThread);
if(clientProto.getFileInfo(opCreate.getBaseDir()) != null
&& clientProto.getFileInfo(getBaseDir()) == null) {
clientProto.rename(opCreate.getBaseDir(), getBaseDir());
}
if(clientProto.getFileInfo(getBaseDir()) == null) {
throw new IOException(getBaseDir() + " does not exist.");
}
}
/**
* Do file open.
*/
@Override
long executeOp(int daemonId, int inputIdx, String ignore)
throws IOException {
long start = Time.now();
clientProto.getBlockLocations(fileNames[daemonId][inputIdx], 0L, BLOCK_SIZE);
long end = Time.now();
return end-start;
}
}
/**
* Delete file statistics.
*
* Measure how many delete calls the name-node can handle per second.
*/
class DeleteFileStats extends OpenFileStats {
// Operation types
static final String OP_DELETE_NAME = "delete";
static final String OP_DELETE_USAGE =
"-op " + OP_DELETE_NAME + OP_USAGE_ARGS;
DeleteFileStats(List<String> args) {
super(args);
}
@Override
String getOpName() {
return OP_DELETE_NAME;
}
@Override
long executeOp(int daemonId, int inputIdx, String ignore)
throws IOException {
long start = Time.now();
clientProto.delete(fileNames[daemonId][inputIdx], false);
long end = Time.now();
return end-start;
}
}
/**
* List file status statistics.
*
* Measure how many get-file-status calls the name-node can handle per second.
*/
class FileStatusStats extends OpenFileStats {
// Operation types
static final String OP_FILE_STATUS_NAME = "fileStatus";
static final String OP_FILE_STATUS_USAGE =
"-op " + OP_FILE_STATUS_NAME + OP_USAGE_ARGS;
FileStatusStats(List<String> args) {
super(args);
}
@Override
String getOpName() {
return OP_FILE_STATUS_NAME;
}
@Override
long executeOp(int daemonId, int inputIdx, String ignore)
throws IOException {
long start = Time.now();
clientProto.getFileInfo(fileNames[daemonId][inputIdx]);
long end = Time.now();
return end-start;
}
}
/**
* Rename file statistics.
*
* Measure how many rename calls the name-node can handle per second.
*/
class RenameFileStats extends OpenFileStats {
// Operation types
static final String OP_RENAME_NAME = "rename";
static final String OP_RENAME_USAGE =
"-op " + OP_RENAME_NAME + OP_USAGE_ARGS;
protected String[][] destNames;
RenameFileStats(List<String> args) {
super(args);
}
@Override
String getOpName() {
return OP_RENAME_NAME;
}
@Override
void generateInputs(int[] opsPerThread) throws IOException {
super.generateInputs(opsPerThread);
destNames = new String[fileNames.length][];
for(int idx=0; idx < numThreads; idx++) {
int nrNames = fileNames[idx].length;
destNames[idx] = new String[nrNames];
for(int jdx=0; jdx < nrNames; jdx++)
destNames[idx][jdx] = fileNames[idx][jdx] + ".r";
}
}
@Override
long executeOp(int daemonId, int inputIdx, String ignore)
throws IOException {
long start = Time.now();
clientProto.rename(fileNames[daemonId][inputIdx],
destNames[daemonId][inputIdx]);
long end = Time.now();
return end-start;
}
}
/**
* Minimal data-node simulator.
*/
private static class TinyDatanode implements Comparable<String> {
private static final long DF_CAPACITY = 100*1024*1024;
private static final long DF_USED = 0;
NamespaceInfo nsInfo;
DatanodeRegistration dnRegistration;
DatanodeStorage storage; //only one storage
final ArrayList<BlockReportReplica> blocks;
int nrBlocks; // actual number of blocks
BlockListAsLongs blockReportList;
final int dnIdx;
private static int getNodePort(int num) throws IOException {
int port = 1 + num;
Preconditions.checkState(port < Short.MAX_VALUE);
return port;
}
TinyDatanode(int dnIdx, int blockCapacity) throws IOException {
this.dnIdx = dnIdx;
this.blocks = new ArrayList<BlockReportReplica>(blockCapacity);
this.nrBlocks = 0;
}
@Override
public String toString() {
return dnRegistration.toString();
}
String getXferAddr() {
return dnRegistration.getXferAddr();
}
void register() throws IOException {
// get versions from the namenode
nsInfo = nameNodeProto.versionRequest();
dnRegistration = new DatanodeRegistration(
new DatanodeID(DNS.getDefaultIP("default"),
DNS.getDefaultHost("default", "default"),
DataNode.generateUuid(), getNodePort(dnIdx),
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
new DataStorage(nsInfo),
new ExportedBlockKeys(), VersionInfo.getVersion());
// register datanode
dnRegistration = dataNodeProto.registerDatanode(dnRegistration);
dnRegistration.setNamespaceInfo(nsInfo);
//first block reports
storage = new DatanodeStorage(DatanodeStorage.generateUuid());
final StorageBlockReport[] reports = {
new StorageBlockReport(storage, BlockListAsLongs.EMPTY)
};
dataNodeProto.blockReport(dnRegistration, bpid, reports,
new BlockReportContext(1, 0, System.nanoTime(), 0L));
}
/**
* Send a heartbeat to the name-node.
* Ignore reply commands.
*/
void sendHeartbeat() throws IOException {
// register datanode
// TODO:FEDERATION currently a single block pool is supported
StorageReport[] rep = { new StorageReport(storage, false,
DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
DatanodeCommand[] cmds = dataNodeProto.sendHeartbeat(dnRegistration, rep,
0L, 0L, 0, 0, 0, null, true).getCommands();
if(cmds != null) {
for (DatanodeCommand cmd : cmds ) {
if(LOG.isDebugEnabled()) {
LOG.debug("sendHeartbeat Name-node reply: " + cmd.getAction());
}
}
}
}
boolean addBlock(Block blk) {
if(nrBlocks == blocks.size()) {
if(LOG.isDebugEnabled()) {
LOG.debug("Cannot add block: datanode capacity = " + blocks.size());
}
return false;
}
blocks.set(nrBlocks, new BlockReportReplica(blk));
nrBlocks++;
return true;
}
void formBlockReport() {
// fill remaining slots with blocks that do not exist
for (int idx = blocks.size()-1; idx >= nrBlocks; idx--) {
Block block = new Block(blocks.size() - idx, 0, 0);
blocks.set(idx, new BlockReportReplica(block));
}
blockReportList = BlockListAsLongs.EMPTY;
}
BlockListAsLongs getBlockReportList() {
return blockReportList;
}
@Override
public int compareTo(String xferAddr) {
return getXferAddr().compareTo(xferAddr);
}
/**
* Send a heartbeat to the name-node and replicate blocks if requested.
*/
@SuppressWarnings("unused") // keep it for future blockReceived benchmark
int replicateBlocks() throws IOException {
// register datanode
StorageReport[] rep = { new StorageReport(storage,
false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
DatanodeCommand[] cmds = dataNodeProto.sendHeartbeat(dnRegistration,
rep, 0L, 0L, 0, 0, 0, null, true).getCommands();
if (cmds != null) {
for (DatanodeCommand cmd : cmds) {
if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
// Send a copy of a block to another datanode
BlockCommand bcmd = (BlockCommand)cmd;
return transferBlocks(bcmd.getBlocks(), bcmd.getTargets(),
bcmd.getTargetStorageIDs());
}
}
}
return 0;
}
/**
* Transfer blocks to another data-node.
* Just report on behalf of the other data-node
* that the blocks have been received.
*/
private int transferBlocks( Block blocks[],
DatanodeInfo xferTargets[][],
String targetStorageIDs[][]
) throws IOException {
for(int i = 0; i < blocks.length; i++) {
DatanodeInfo blockTargets[] = xferTargets[i];
for(int t = 0; t < blockTargets.length; t++) {
DatanodeInfo dnInfo = blockTargets[t];
String targetStorageID = targetStorageIDs[i][t];
DatanodeRegistration receivedDNReg;
receivedDNReg = new DatanodeRegistration(dnInfo,
new DataStorage(nsInfo),
new ExportedBlockKeys(), VersionInfo.getVersion());
ReceivedDeletedBlockInfo[] rdBlocks = {
new ReceivedDeletedBlockInfo(
blocks[i], ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK,
null) };
StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks(
targetStorageID, rdBlocks) };
dataNodeProto.blockReceivedAndDeleted(receivedDNReg, bpid, report);
}
}
return blocks.length;
}
}
/**
* Block report statistics.
*
* Each thread here represents its own data-node.
* Data-nodes send the same block report each time.
* The block report may contain missing or non-existing blocks.
*/
class BlockReportStats extends OperationStatsBase {
static final String OP_BLOCK_REPORT_NAME = "blockReport";
static final String OP_BLOCK_REPORT_USAGE =
"-op blockReport [-datanodes T] [-reports N] " +
"[-blocksPerReport B] [-blocksPerFile F]";
private int blocksPerReport;
private int blocksPerFile;
private TinyDatanode[] datanodes; // array of data-nodes sorted by name
BlockReportStats(List<String> args) {
super();
this.blocksPerReport = 100;
this.blocksPerFile = 10;
// set heartbeat interval to 3 min, so that expiration were 40 min
config.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 3 * 60);
parseArguments(args);
// adjust replication to the number of data-nodes
this.replication = (short)Math.min(replication, getNumDatanodes());
}
/**
* Each thread pretends its a data-node here.
*/
private int getNumDatanodes() {
return numThreads;
}
@Override
String getOpName() {
return OP_BLOCK_REPORT_NAME;
}
@Override
void parseArguments(List<String> args) {
boolean ignoreUnrelatedOptions = verifyOpArgument(args);
for (int i = 2; i < args.size(); i++) { // parse command line
if(args.get(i).equals("-reports")) {
if(i+1 == args.size()) printUsage();
numOpsRequired = Integer.parseInt(args.get(++i));
} else if(args.get(i).equals("-datanodes")) {
if(i+1 == args.size()) printUsage();
numThreads = Integer.parseInt(args.get(++i));
} else if(args.get(i).equals("-blocksPerReport")) {
if(i+1 == args.size()) printUsage();
blocksPerReport = Integer.parseInt(args.get(++i));
} else if(args.get(i).equals("-blocksPerFile")) {
if(i+1 == args.size()) printUsage();
blocksPerFile = Integer.parseInt(args.get(++i));
} else if(!ignoreUnrelatedOptions)
printUsage();
}
}
@Override
void generateInputs(int[] ignore) throws IOException {
int nrDatanodes = getNumDatanodes();
int nrBlocks = (int)Math.ceil((double)blocksPerReport * nrDatanodes
/ replication);
int nrFiles = (int)Math.ceil((double)nrBlocks / blocksPerFile);
datanodes = new TinyDatanode[nrDatanodes];
// create data-nodes
String prevDNName = "";
for(int idx=0; idx < nrDatanodes; idx++) {
datanodes[idx] = new TinyDatanode(idx, blocksPerReport);
datanodes[idx].register();
assert datanodes[idx].getXferAddr().compareTo(prevDNName) > 0
: "Data-nodes must be sorted lexicographically.";
datanodes[idx].sendHeartbeat();
prevDNName = datanodes[idx].getXferAddr();
}
// create files
LOG.info("Creating " + nrFiles + " files with " + blocksPerFile + " blocks each.");
FileNameGenerator nameGenerator;
nameGenerator = new FileNameGenerator(getBaseDir(), 100);
String clientName = getClientName(007);
clientProto.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE,
false);
for(int idx=0; idx < nrFiles; idx++) {
String fileName = nameGenerator.getNextFileName("ThroughputBench");
clientProto.create(fileName, FsPermission.getDefault(), clientName,
new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)), true, replication,
BLOCK_SIZE, CryptoProtocolVersion.supported());
ExtendedBlock lastBlock = addBlocks(fileName, clientName);
clientProto.complete(fileName, clientName, lastBlock, HdfsConstants.GRANDFATHER_INODE_ID);
}
// prepare block reports
for(int idx=0; idx < nrDatanodes; idx++) {
datanodes[idx].formBlockReport();
}
}
private ExtendedBlock addBlocks(String fileName, String clientName)
throws IOException {
ExtendedBlock prevBlock = null;
for(int jdx = 0; jdx < blocksPerFile; jdx++) {
LocatedBlock loc = clientProto.addBlock(fileName, clientName,
prevBlock, null, HdfsConstants.GRANDFATHER_INODE_ID, null);
prevBlock = loc.getBlock();
for(DatanodeInfo dnInfo : loc.getLocations()) {
int dnIdx = Arrays.binarySearch(datanodes, dnInfo.getXferAddr());
datanodes[dnIdx].addBlock(loc.getBlock().getLocalBlock());
ReceivedDeletedBlockInfo[] rdBlocks = { new ReceivedDeletedBlockInfo(
loc.getBlock().getLocalBlock(),
ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null) };
StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks(
datanodes[dnIdx].storage.getStorageID(), rdBlocks) };
dataNodeProto.blockReceivedAndDeleted(datanodes[dnIdx].dnRegistration,
bpid, report);
}
}
return prevBlock;
}
/**
* Does not require the argument
*/
@Override
String getExecutionArgument(int daemonId) {
return null;
}
@Override
long executeOp(int daemonId, int inputIdx, String ignore) throws IOException {
assert daemonId < numThreads : "Wrong daemonId.";
TinyDatanode dn = datanodes[daemonId];
long start = Time.now();
StorageBlockReport[] report = { new StorageBlockReport(
dn.storage, dn.getBlockReportList()) };
dataNodeProto.blockReport(dn.dnRegistration, bpid, report,
new BlockReportContext(1, 0, System.nanoTime(), 0L));
long end = Time.now();
return end-start;
}
@Override
void printResults() {
String blockDistribution = "";
String delim = "(";
for(int idx=0; idx < getNumDatanodes(); idx++) {
blockDistribution += delim + datanodes[idx].nrBlocks;
delim = ", ";
}
blockDistribution += ")";
LOG.info("--- " + getOpName() + " inputs ---");
LOG.info("reports = " + numOpsRequired);
LOG.info("datanodes = " + numThreads + " " + blockDistribution);
LOG.info("blocksPerReport = " + blocksPerReport);
LOG.info("blocksPerFile = " + blocksPerFile);
printStats();
}
} // end BlockReportStats
/**
* Measures how fast replication monitor can compute data-node work.
*
* It runs only one thread until no more work can be scheduled.
*/
class ReplicationStats extends OperationStatsBase {
static final String OP_REPLICATION_NAME = "replication";
static final String OP_REPLICATION_USAGE =
"-op replication [-datanodes T] [-nodesToDecommission D] " +
"[-nodeReplicationLimit C] [-totalBlocks B] [-replication R]";
private final BlockReportStats blockReportObject;
private int numDatanodes;
private int nodesToDecommission;
private int nodeReplicationLimit;
private int totalBlocks;
private int numDecommissionedBlocks;
private int numPendingBlocks;
ReplicationStats(List<String> args) {
super();
numThreads = 1;
numDatanodes = 3;
nodesToDecommission = 1;
nodeReplicationLimit = 100;
totalBlocks = 100;
parseArguments(args);
// number of operations is 4 times the number of decommissioned
// blocks divided by the number of needed replications scanned
// by the replication monitor in one iteration
numOpsRequired = (totalBlocks*replication*nodesToDecommission*2)
/ (numDatanodes*numDatanodes);
String[] blkReportArgs = {
"-op", "blockReport",
"-datanodes", String.valueOf(numDatanodes),
"-blocksPerReport", String.valueOf(totalBlocks*replication/numDatanodes),
"-blocksPerFile", String.valueOf(numDatanodes)};
blockReportObject = new BlockReportStats(Arrays.asList(blkReportArgs));
numDecommissionedBlocks = 0;
numPendingBlocks = 0;
}
@Override
String getOpName() {
return OP_REPLICATION_NAME;
}
@Override
void parseArguments(List<String> args) {
boolean ignoreUnrelatedOptions = verifyOpArgument(args);
for (int i = 2; i < args.size(); i++) { // parse command line
if(args.get(i).equals("-datanodes")) {
if(i+1 == args.size()) printUsage();
numDatanodes = Integer.parseInt(args.get(++i));
} else if(args.get(i).equals("-nodesToDecommission")) {
if(i+1 == args.size()) printUsage();
nodesToDecommission = Integer.parseInt(args.get(++i));
} else if(args.get(i).equals("-nodeReplicationLimit")) {
if(i+1 == args.size()) printUsage();
nodeReplicationLimit = Integer.parseInt(args.get(++i));
} else if(args.get(i).equals("-totalBlocks")) {
if(i+1 == args.size()) printUsage();
totalBlocks = Integer.parseInt(args.get(++i));
} else if(args.get(i).equals("-replication")) {
if(i+1 == args.size()) printUsage();
replication = Short.parseShort(args.get(++i));
} else if(!ignoreUnrelatedOptions)
printUsage();
}
}
@Override
void generateInputs(int[] ignore) throws IOException {
final FSNamesystem namesystem = nameNode.getNamesystem();
// start data-nodes; create a bunch of files; generate block reports.
blockReportObject.generateInputs(ignore);
// stop replication monitor
BlockManagerTestUtil.stopReplicationThread(namesystem.getBlockManager());
// report blocks once
int nrDatanodes = blockReportObject.getNumDatanodes();
for(int idx=0; idx < nrDatanodes; idx++) {
blockReportObject.executeOp(idx, 0, null);
}
// decommission data-nodes
decommissionNodes();
// set node replication limit
BlockManagerTestUtil.setNodeReplicationLimit(namesystem.getBlockManager(),
nodeReplicationLimit);
}
private void decommissionNodes() throws IOException {
String excludeFN = config.get(DFSConfigKeys.DFS_HOSTS_EXCLUDE, "exclude");
FileOutputStream excludeFile = new FileOutputStream(excludeFN);
excludeFile.getChannel().truncate(0L);
int nrDatanodes = blockReportObject.getNumDatanodes();
numDecommissionedBlocks = 0;
for(int i=0; i < nodesToDecommission; i++) {
TinyDatanode dn = blockReportObject.datanodes[nrDatanodes-1-i];
numDecommissionedBlocks += dn.nrBlocks;
excludeFile.write(dn.getXferAddr().getBytes());
excludeFile.write('\n');
LOG.info("Datanode " + dn + " is decommissioned.");
}
excludeFile.close();
clientProto.refreshNodes();
}
/**
* Does not require the argument
*/
@Override
String getExecutionArgument(int daemonId) {
return null;
}
@Override
long executeOp(int daemonId, int inputIdx, String ignore) throws IOException {
assert daemonId < numThreads : "Wrong daemonId.";
long start = Time.now();
// compute data-node work
int work = BlockManagerTestUtil.getComputedDatanodeWork(
nameNode.getNamesystem().getBlockManager());
long end = Time.now();
numPendingBlocks += work;
if(work == 0)
daemons.get(daemonId).terminate();
return end-start;
}
@Override
void printResults() {
String blockDistribution = "";
String delim = "(";
for(int idx=0; idx < blockReportObject.getNumDatanodes(); idx++) {
blockDistribution += delim + blockReportObject.datanodes[idx].nrBlocks;
delim = ", ";
}
blockDistribution += ")";
LOG.info("--- " + getOpName() + " inputs ---");
LOG.info("numOpsRequired = " + numOpsRequired);
LOG.info("datanodes = " + numDatanodes + " " + blockDistribution);
LOG.info("decommissioned datanodes = " + nodesToDecommission);
LOG.info("datanode replication limit = " + nodeReplicationLimit);
LOG.info("total blocks = " + totalBlocks);
printStats();
LOG.info("decommissioned blocks = " + numDecommissionedBlocks);
LOG.info("pending replications = " + numPendingBlocks);
LOG.info("replications per sec: " + getBlocksPerSecond());
}
private double getBlocksPerSecond() {
return elapsedTime == 0 ? 0 : 1000*(double)numPendingBlocks / elapsedTime;
}
} // end ReplicationStats
static void printUsage() {
System.err.println("Usage: NNThroughputBenchmark"
+ "\n\t" + OperationStatsBase.OP_ALL_USAGE
+ " | \n\t" + CreateFileStats.OP_CREATE_USAGE
+ " | \n\t" + MkdirsStats.OP_MKDIRS_USAGE
+ " | \n\t" + OpenFileStats.OP_OPEN_USAGE
+ " | \n\t" + DeleteFileStats.OP_DELETE_USAGE
+ " | \n\t" + FileStatusStats.OP_FILE_STATUS_USAGE
+ " | \n\t" + RenameFileStats.OP_RENAME_USAGE
+ " | \n\t" + BlockReportStats.OP_BLOCK_REPORT_USAGE
+ " | \n\t" + ReplicationStats.OP_REPLICATION_USAGE
+ " | \n\t" + CleanAllStats.OP_CLEAN_USAGE
+ " | \n\t" + GENERAL_OPTIONS_USAGE
);
System.exit(-1);
}
public static void runBenchmark(Configuration conf, List<String> args)
throws Exception {
NNThroughputBenchmark bench = null;
try {
bench = new NNThroughputBenchmark(conf);
bench.run(args.toArray(new String[]{}));
} finally {
if(bench != null)
bench.close();
}
}
/**
* Main method of the benchmark.
* @param aArgs command line parameters
*/
@Override // Tool
public int run(String[] aArgs) throws Exception {
List<String> args = new ArrayList<String>(Arrays.asList(aArgs));
if(args.size() < 2 || ! args.get(0).startsWith("-op"))
printUsage();
String type = args.get(1);
boolean runAll = OperationStatsBase.OP_ALL_NAME.equals(type);
// Start the NameNode
String[] argv = new String[] {};
List<OperationStatsBase> ops = new ArrayList<OperationStatsBase>();
OperationStatsBase opStat = null;
try {
if(runAll || CreateFileStats.OP_CREATE_NAME.equals(type)) {
opStat = new CreateFileStats(args);
ops.add(opStat);
}
if(runAll || MkdirsStats.OP_MKDIRS_NAME.equals(type)) {
opStat = new MkdirsStats(args);
ops.add(opStat);
}
if(runAll || OpenFileStats.OP_OPEN_NAME.equals(type)) {
opStat = new OpenFileStats(args);
ops.add(opStat);
}
if(runAll || DeleteFileStats.OP_DELETE_NAME.equals(type)) {
opStat = new DeleteFileStats(args);
ops.add(opStat);
}
if(runAll || FileStatusStats.OP_FILE_STATUS_NAME.equals(type)) {
opStat = new FileStatusStats(args);
ops.add(opStat);
}
if(runAll || RenameFileStats.OP_RENAME_NAME.equals(type)) {
opStat = new RenameFileStats(args);
ops.add(opStat);
}
if(runAll || BlockReportStats.OP_BLOCK_REPORT_NAME.equals(type)) {
opStat = new BlockReportStats(args);
ops.add(opStat);
}
if(runAll || ReplicationStats.OP_REPLICATION_NAME.equals(type)) {
opStat = new ReplicationStats(args);
ops.add(opStat);
}
if(runAll || CleanAllStats.OP_CLEAN_NAME.equals(type)) {
opStat = new CleanAllStats(args);
ops.add(opStat);
}
if (namenodeUri == null) {
nameNode = NameNode.createNameNode(argv, config);
NamenodeProtocols nnProtos = nameNode.getRpcServer();
nameNodeProto = nnProtos;
clientProto = nnProtos;
dataNodeProto = nnProtos;
refreshUserMappingsProto = nnProtos;
bpid = nameNode.getNamesystem().getBlockPoolId();
} else {
FileSystem.setDefaultUri(getConf(), namenodeUri);
DistributedFileSystem dfs = (DistributedFileSystem)
FileSystem.get(getConf());
final URI nnUri = new URI(namenodeUri);
nameNodeProto = DFSTestUtil.getNamenodeProtocolProxy(config, nnUri,
UserGroupInformation.getCurrentUser());
clientProto = dfs.getClient().getNamenode();
dataNodeProto = new DatanodeProtocolClientSideTranslatorPB(
NameNode.getAddress(nnUri), config);
refreshUserMappingsProto =
DFSTestUtil.getRefreshUserMappingsProtocolProxy(config, nnUri);
getBlockPoolId(dfs);
}
if(ops.size() == 0)
printUsage();
// run each benchmark
for(OperationStatsBase op : ops) {
LOG.info("Starting benchmark: " + op.getOpName());
op.benchmark();
op.cleanUp();
}
// print statistics
for(OperationStatsBase op : ops) {
LOG.info("");
op.printResults();
}
} catch(Exception e) {
LOG.error(StringUtils.stringifyException(e));
throw e;
}
return 0;
}
private void getBlockPoolId(DistributedFileSystem unused)
throws IOException {
final NamespaceInfo nsInfo = nameNodeProto.versionRequest();
bpid = nsInfo.getBlockPoolID();
}
public static void main(String[] args) throws Exception {
NNThroughputBenchmark bench = null;
try {
bench = new NNThroughputBenchmark(new HdfsConfiguration());
ToolRunner.run(bench, args);
} finally {
if(bench != null)
bench.close();
}
}
@Override // Configurable
public void setConf(Configuration conf) {
config = conf;
}
@Override // Configurable
public Configuration getConf() {
return config;
}
}
| 54,138 | 33.883376 | 116 |
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.