repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web;
import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS;
import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.mock;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.util.Arrays;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.web.resources.DelegationParam;
import org.apache.hadoop.hdfs.web.resources.DoAsParam;
import org.apache.hadoop.hdfs.web.resources.GetOpParam;
import org.apache.hadoop.hdfs.web.resources.PutOpParam;
import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam;
import org.apache.hadoop.hdfs.web.resources.UserParam;
import org.apache.hadoop.hdfs.web.resources.FsActionParam;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
public class TestWebHdfsUrl {
// NOTE: port is never used
final URI uri = URI.create(WebHdfsConstants.WEBHDFS_SCHEME + "://" + "127.0.0.1:0");
@Before
public void resetUGI() {
UserGroupInformation.setConfiguration(new Configuration());
}
@Test(timeout=60000)
public void testEncodedPathUrl() throws IOException, URISyntaxException{
Configuration conf = new Configuration();
final WebHdfsFileSystem webhdfs = (WebHdfsFileSystem) FileSystem.get(
uri, conf);
// Construct a file path that contains percentage-encoded string
String pathName = "/hdtest010%2C60020%2C1371000602151.1371058984668";
Path fsPath = new Path(pathName);
URL encodedPathUrl = webhdfs.toUrl(PutOpParam.Op.CREATE, fsPath);
// We should get back the original file path after cycling back and decoding
Assert.assertEquals(WebHdfsFileSystem.PATH_PREFIX + pathName,
encodedPathUrl.toURI().getPath());
}
@Test(timeout=60000)
public void testSimpleAuthParamsInUrl() throws IOException {
Configuration conf = new Configuration();
UserGroupInformation ugi =
UserGroupInformation.createRemoteUser("test-user");
UserGroupInformation.setLoginUser(ugi);
WebHdfsFileSystem webhdfs = getWebHdfsFileSystem(ugi, conf);
Path fsPath = new Path("/");
// send user+token
URL fileStatusUrl = webhdfs.toUrl(GetOpParam.Op.GETFILESTATUS, fsPath);
checkQueryParams(
new String[]{
GetOpParam.Op.GETFILESTATUS.toQueryString(),
new UserParam(ugi.getShortUserName()).toString()
},
fileStatusUrl);
}
@Test(timeout=60000)
public void testSimpleProxyAuthParamsInUrl() throws IOException {
Configuration conf = new Configuration();
UserGroupInformation ugi =
UserGroupInformation.createRemoteUser("test-user");
ugi = UserGroupInformation.createProxyUser("test-proxy-user", ugi);
UserGroupInformation.setLoginUser(ugi);
WebHdfsFileSystem webhdfs = getWebHdfsFileSystem(ugi, conf);
Path fsPath = new Path("/");
// send real+effective
URL fileStatusUrl = webhdfs.toUrl(GetOpParam.Op.GETFILESTATUS, fsPath);
checkQueryParams(
new String[]{
GetOpParam.Op.GETFILESTATUS.toQueryString(),
new UserParam(ugi.getRealUser().getShortUserName()).toString(),
new DoAsParam(ugi.getShortUserName()).toString()
},
fileStatusUrl);
}
@Test(timeout=60000)
public void testSecureAuthParamsInUrl() throws IOException {
Configuration conf = new Configuration();
// fake turning on security so api thinks it should use tokens
SecurityUtil.setAuthenticationMethod(KERBEROS, conf);
UserGroupInformation.setConfiguration(conf);
UserGroupInformation ugi =
UserGroupInformation.createRemoteUser("test-user");
ugi.setAuthenticationMethod(KERBEROS);
UserGroupInformation.setLoginUser(ugi);
WebHdfsFileSystem webhdfs = getWebHdfsFileSystem(ugi, conf);
Path fsPath = new Path("/");
String tokenString = webhdfs.getDelegationToken().encodeToUrlString();
// send user
URL getTokenUrl = webhdfs.toUrl(GetOpParam.Op.GETDELEGATIONTOKEN, fsPath);
checkQueryParams(
new String[]{
GetOpParam.Op.GETDELEGATIONTOKEN.toQueryString(),
new UserParam(ugi.getShortUserName()).toString()
},
getTokenUrl);
// send user
URL renewTokenUrl = webhdfs.toUrl(PutOpParam.Op.RENEWDELEGATIONTOKEN,
fsPath, new TokenArgumentParam(tokenString));
checkQueryParams(
new String[]{
PutOpParam.Op.RENEWDELEGATIONTOKEN.toQueryString(),
new UserParam(ugi.getShortUserName()).toString(),
new TokenArgumentParam(tokenString).toString(),
},
renewTokenUrl);
// send token
URL cancelTokenUrl = webhdfs.toUrl(PutOpParam.Op.CANCELDELEGATIONTOKEN,
fsPath, new TokenArgumentParam(tokenString));
checkQueryParams(
new String[]{
PutOpParam.Op.CANCELDELEGATIONTOKEN.toQueryString(),
new UserParam(ugi.getShortUserName()).toString(),
new TokenArgumentParam(tokenString).toString(),
},
cancelTokenUrl);
// send token
URL fileStatusUrl = webhdfs.toUrl(GetOpParam.Op.GETFILESTATUS, fsPath);
checkQueryParams(
new String[]{
GetOpParam.Op.GETFILESTATUS.toQueryString(),
new DelegationParam(tokenString).toString()
},
fileStatusUrl);
// wipe out internal token to simulate auth always required
webhdfs.setDelegationToken(null);
// send user
cancelTokenUrl = webhdfs.toUrl(PutOpParam.Op.CANCELDELEGATIONTOKEN,
fsPath, new TokenArgumentParam(tokenString));
checkQueryParams(
new String[]{
PutOpParam.Op.CANCELDELEGATIONTOKEN.toQueryString(),
new UserParam(ugi.getShortUserName()).toString(),
new TokenArgumentParam(tokenString).toString(),
},
cancelTokenUrl);
// send user
fileStatusUrl = webhdfs.toUrl(GetOpParam.Op.GETFILESTATUS, fsPath);
checkQueryParams(
new String[]{
GetOpParam.Op.GETFILESTATUS.toQueryString(),
new UserParam(ugi.getShortUserName()).toString()
},
fileStatusUrl);
}
@Test(timeout=60000)
public void testSecureProxyAuthParamsInUrl() throws IOException {
Configuration conf = new Configuration();
// fake turning on security so api thinks it should use tokens
SecurityUtil.setAuthenticationMethod(KERBEROS, conf);
UserGroupInformation.setConfiguration(conf);
UserGroupInformation ugi =
UserGroupInformation.createRemoteUser("test-user");
ugi.setAuthenticationMethod(KERBEROS);
ugi = UserGroupInformation.createProxyUser("test-proxy-user", ugi);
UserGroupInformation.setLoginUser(ugi);
WebHdfsFileSystem webhdfs = getWebHdfsFileSystem(ugi, conf);
Path fsPath = new Path("/");
String tokenString = webhdfs.getDelegationToken().encodeToUrlString();
// send real+effective
URL getTokenUrl = webhdfs.toUrl(GetOpParam.Op.GETDELEGATIONTOKEN, fsPath);
checkQueryParams(
new String[]{
GetOpParam.Op.GETDELEGATIONTOKEN.toQueryString(),
new UserParam(ugi.getRealUser().getShortUserName()).toString(),
new DoAsParam(ugi.getShortUserName()).toString()
},
getTokenUrl);
// send real+effective
URL renewTokenUrl = webhdfs.toUrl(PutOpParam.Op.RENEWDELEGATIONTOKEN,
fsPath, new TokenArgumentParam(tokenString));
checkQueryParams(
new String[]{
PutOpParam.Op.RENEWDELEGATIONTOKEN.toQueryString(),
new UserParam(ugi.getRealUser().getShortUserName()).toString(),
new DoAsParam(ugi.getShortUserName()).toString(),
new TokenArgumentParam(tokenString).toString(),
},
renewTokenUrl);
// send token
URL cancelTokenUrl = webhdfs.toUrl(PutOpParam.Op.CANCELDELEGATIONTOKEN,
fsPath, new TokenArgumentParam(tokenString));
checkQueryParams(
new String[]{
PutOpParam.Op.CANCELDELEGATIONTOKEN.toQueryString(),
new UserParam(ugi.getRealUser().getShortUserName()).toString(),
new DoAsParam(ugi.getShortUserName()).toString(),
new TokenArgumentParam(tokenString).toString(),
},
cancelTokenUrl);
// send token
URL fileStatusUrl = webhdfs.toUrl(GetOpParam.Op.GETFILESTATUS, fsPath);
checkQueryParams(
new String[]{
GetOpParam.Op.GETFILESTATUS.toQueryString(),
new DelegationParam(tokenString).toString()
},
fileStatusUrl);
// wipe out internal token to simulate auth always required
webhdfs.setDelegationToken(null);
// send real+effective
cancelTokenUrl = webhdfs.toUrl(PutOpParam.Op.CANCELDELEGATIONTOKEN,
fsPath, new TokenArgumentParam(tokenString));
checkQueryParams(
new String[]{
PutOpParam.Op.CANCELDELEGATIONTOKEN.toQueryString(),
new UserParam(ugi.getRealUser().getShortUserName()).toString(),
new DoAsParam(ugi.getShortUserName()).toString(),
new TokenArgumentParam(tokenString).toString()
},
cancelTokenUrl);
// send real+effective
fileStatusUrl = webhdfs.toUrl(GetOpParam.Op.GETFILESTATUS, fsPath);
checkQueryParams(
new String[]{
GetOpParam.Op.GETFILESTATUS.toQueryString(),
new UserParam(ugi.getRealUser().getShortUserName()).toString(),
new DoAsParam(ugi.getShortUserName()).toString()
},
fileStatusUrl);
}
@Test(timeout=60000)
public void testCheckAccessUrl() throws IOException {
Configuration conf = new Configuration();
UserGroupInformation ugi =
UserGroupInformation.createRemoteUser("test-user");
UserGroupInformation.setLoginUser(ugi);
WebHdfsFileSystem webhdfs = getWebHdfsFileSystem(ugi, conf);
Path fsPath = new Path("/p1");
URL checkAccessUrl = webhdfs.toUrl(GetOpParam.Op.CHECKACCESS,
fsPath, new FsActionParam(FsAction.READ_WRITE));
checkQueryParams(
new String[]{
GetOpParam.Op.CHECKACCESS.toQueryString(),
new UserParam(ugi.getShortUserName()).toString(),
FsActionParam.NAME + "=" + FsAction.READ_WRITE.SYMBOL
},
checkAccessUrl);
}
private void checkQueryParams(String[] expected, URL url) {
Arrays.sort(expected);
String[] query = url.getQuery().split("&");
Arrays.sort(query);
assertEquals(Arrays.toString(expected), Arrays.toString(query));
}
private WebHdfsFileSystem getWebHdfsFileSystem(UserGroupInformation ugi,
Configuration conf) throws IOException {
if (UserGroupInformation.isSecurityEnabled()) {
DelegationTokenIdentifier dtId = new DelegationTokenIdentifier(new Text(
ugi.getUserName()), null, null);
FSNamesystem namesystem = mock(FSNamesystem.class);
DelegationTokenSecretManager dtSecretManager = new DelegationTokenSecretManager(
86400000, 86400000, 86400000, 86400000, namesystem);
dtSecretManager.startThreads();
Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>(
dtId, dtSecretManager);
SecurityUtil.setTokenService(
token, NetUtils.createSocketAddr(uri.getAuthority()));
token.setKind(WebHdfsConstants.WEBHDFS_TOKEN_KIND);
ugi.addToken(token);
}
return (WebHdfsFileSystem) FileSystem.get(uri, conf);
}
}
| 12,871 | 37.195846 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web;
import static org.apache.hadoop.fs.permission.AclEntryScope.*;
import static org.apache.hadoop.fs.permission.AclEntryType.*;
import static org.apache.hadoop.fs.permission.FsAction.*;
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*;
import java.io.IOException;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.XAttrCodec;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.XAttrHelper;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.util.Time;
import org.codehaus.jackson.map.ObjectMapper;
import org.codehaus.jackson.map.ObjectReader;
import org.junit.Assert;
import org.junit.Test;
import com.google.common.collect.Lists;
public class TestJsonUtil {
static FileStatus toFileStatus(HdfsFileStatus f, String parent) {
return new FileStatus(f.getLen(), f.isDir(), f.getReplication(),
f.getBlockSize(), f.getModificationTime(), f.getAccessTime(),
f.getPermission(), f.getOwner(), f.getGroup(),
f.isSymlink() ? new Path(f.getSymlink()) : null,
new Path(f.getFullName(parent)));
}
@Test
public void testHdfsFileStatus() throws IOException {
final long now = Time.now();
final String parent = "/dir";
final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L << 26,
now, now + 10, new FsPermission((short) 0644), "user", "group",
DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo"),
HdfsConstants.GRANDFATHER_INODE_ID, 0, null, (byte) 0);
final FileStatus fstatus = toFileStatus(status, parent);
System.out.println("status = " + status);
System.out.println("fstatus = " + fstatus);
final String json = JsonUtil.toJsonString(status, true);
System.out.println("json = " + json.replace(",", ",\n "));
ObjectReader reader = new ObjectMapper().reader(Map.class);
final HdfsFileStatus s2 =
JsonUtilClient.toFileStatus((Map<?, ?>) reader.readValue(json), true);
final FileStatus fs2 = toFileStatus(s2, parent);
System.out.println("s2 = " + s2);
System.out.println("fs2 = " + fs2);
Assert.assertEquals(fstatus, fs2);
}
@Test
public void testToDatanodeInfoWithoutSecurePort() throws Exception {
Map<String, Object> response = new HashMap<String, Object>();
response.put("ipAddr", "127.0.0.1");
response.put("hostName", "localhost");
response.put("storageID", "fake-id");
response.put("xferPort", 1337l);
response.put("infoPort", 1338l);
// deliberately don't include an entry for "infoSecurePort"
response.put("ipcPort", 1339l);
response.put("capacity", 1024l);
response.put("dfsUsed", 512l);
response.put("remaining", 512l);
response.put("blockPoolUsed", 512l);
response.put("lastUpdate", 0l);
response.put("xceiverCount", 4096l);
response.put("networkLocation", "foo.bar.baz");
response.put("adminState", "NORMAL");
response.put("cacheCapacity", 123l);
response.put("cacheUsed", 321l);
JsonUtilClient.toDatanodeInfo(response);
}
@Test
public void testToDatanodeInfoWithName() throws Exception {
Map<String, Object> response = new HashMap<String, Object>();
// Older servers (1.x, 0.23, etc.) sends 'name' instead of ipAddr
// and xferPort.
String name = "127.0.0.1:1004";
response.put("name", name);
response.put("hostName", "localhost");
response.put("storageID", "fake-id");
response.put("infoPort", 1338l);
response.put("ipcPort", 1339l);
response.put("capacity", 1024l);
response.put("dfsUsed", 512l);
response.put("remaining", 512l);
response.put("blockPoolUsed", 512l);
response.put("lastUpdate", 0l);
response.put("xceiverCount", 4096l);
response.put("networkLocation", "foo.bar.baz");
response.put("adminState", "NORMAL");
response.put("cacheCapacity", 123l);
response.put("cacheUsed", 321l);
DatanodeInfo di = JsonUtilClient.toDatanodeInfo(response);
Assert.assertEquals(name, di.getXferAddr());
// The encoded result should contain name, ipAddr and xferPort.
Map<String, Object> r = JsonUtil.toJsonMap(di);
Assert.assertEquals(name, r.get("name"));
Assert.assertEquals("127.0.0.1", r.get("ipAddr"));
// In this test, it is Integer instead of Long since json was not actually
// involved in constructing the map.
Assert.assertEquals(1004, (int)(Integer)r.get("xferPort"));
// Invalid names
String[] badNames = {"127.0.0.1", "127.0.0.1:", ":", "127.0.0.1:sweet", ":123"};
for (String badName : badNames) {
response.put("name", badName);
checkDecodeFailure(response);
}
// Missing both name and ipAddr
response.remove("name");
checkDecodeFailure(response);
// Only missing xferPort
response.put("ipAddr", "127.0.0.1");
checkDecodeFailure(response);
}
@Test
public void testToAclStatus() throws IOException {
String jsonString =
"{\"AclStatus\":{\"entries\":[\"user::rwx\",\"user:user1:rw-\",\"group::rw-\",\"other::r-x\"],\"group\":\"supergroup\",\"owner\":\"testuser\",\"stickyBit\":false}}";
ObjectReader reader = new ObjectMapper().reader(Map.class);
Map<?, ?> json = reader.readValue(jsonString);
List<AclEntry> aclSpec =
Lists.newArrayList(aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "user1", READ_WRITE),
aclEntry(ACCESS, GROUP, READ_WRITE),
aclEntry(ACCESS, OTHER, READ_EXECUTE));
AclStatus.Builder aclStatusBuilder = new AclStatus.Builder();
aclStatusBuilder.owner("testuser");
aclStatusBuilder.group("supergroup");
aclStatusBuilder.addEntries(aclSpec);
aclStatusBuilder.stickyBit(false);
Assert.assertEquals("Should be equal", aclStatusBuilder.build(),
JsonUtilClient.toAclStatus(json));
}
@Test
public void testToJsonFromAclStatus() {
String jsonString =
"{\"AclStatus\":{\"entries\":[\"user:user1:rwx\",\"group::rw-\"],\"group\":\"supergroup\",\"owner\":\"testuser\",\"stickyBit\":false}}";
AclStatus.Builder aclStatusBuilder = new AclStatus.Builder();
aclStatusBuilder.owner("testuser");
aclStatusBuilder.group("supergroup");
aclStatusBuilder.stickyBit(false);
List<AclEntry> aclSpec =
Lists.newArrayList(aclEntry(ACCESS, USER,"user1", ALL),
aclEntry(ACCESS, GROUP, READ_WRITE));
aclStatusBuilder.addEntries(aclSpec);
Assert.assertEquals(jsonString,
JsonUtil.toJsonString(aclStatusBuilder.build()));
}
@Test
public void testToJsonFromXAttrs() throws IOException {
String jsonString =
"{\"XAttrs\":[{\"name\":\"user.a1\",\"value\":\"0x313233\"}," +
"{\"name\":\"user.a2\",\"value\":\"0x313131\"}]}";
XAttr xAttr1 = (new XAttr.Builder()).setNameSpace(XAttr.NameSpace.USER).
setName("a1").setValue(XAttrCodec.decodeValue("0x313233")).build();
XAttr xAttr2 = (new XAttr.Builder()).setNameSpace(XAttr.NameSpace.USER).
setName("a2").setValue(XAttrCodec.decodeValue("0x313131")).build();
List<XAttr> xAttrs = Lists.newArrayList();
xAttrs.add(xAttr1);
xAttrs.add(xAttr2);
Assert.assertEquals(jsonString, JsonUtil.toJsonString(xAttrs,
XAttrCodec.HEX));
}
@Test
public void testToXAttrMap() throws IOException {
String jsonString =
"{\"XAttrs\":[{\"name\":\"user.a1\",\"value\":\"0x313233\"}," +
"{\"name\":\"user.a2\",\"value\":\"0x313131\"}]}";
ObjectReader reader = new ObjectMapper().reader(Map.class);
Map<?, ?> json = reader.readValue(jsonString);
XAttr xAttr1 = (new XAttr.Builder()).setNameSpace(XAttr.NameSpace.USER).
setName("a1").setValue(XAttrCodec.decodeValue("0x313233")).build();
XAttr xAttr2 = (new XAttr.Builder()).setNameSpace(XAttr.NameSpace.USER).
setName("a2").setValue(XAttrCodec.decodeValue("0x313131")).build();
List<XAttr> xAttrs = Lists.newArrayList();
xAttrs.add(xAttr1);
xAttrs.add(xAttr2);
Map<String, byte[]> xAttrMap = XAttrHelper.buildXAttrMap(xAttrs);
Map<String, byte[]> parsedXAttrMap = JsonUtilClient.toXAttrs(json);
Assert.assertEquals(xAttrMap.size(), parsedXAttrMap.size());
Iterator<Entry<String, byte[]>> iter = xAttrMap.entrySet().iterator();
while(iter.hasNext()) {
Entry<String, byte[]> entry = iter.next();
Assert.assertArrayEquals(entry.getValue(),
parsedXAttrMap.get(entry.getKey()));
}
}
@Test
public void testGetXAttrFromJson() throws IOException {
String jsonString =
"{\"XAttrs\":[{\"name\":\"user.a1\",\"value\":\"0x313233\"}," +
"{\"name\":\"user.a2\",\"value\":\"0x313131\"}]}";
ObjectReader reader = new ObjectMapper().reader(Map.class);
Map<?, ?> json = reader.readValue(jsonString);
// Get xattr: user.a2
byte[] value = JsonUtilClient.getXAttr(json, "user.a2");
Assert.assertArrayEquals(XAttrCodec.decodeValue("0x313131"), value);
}
private void checkDecodeFailure(Map<String, Object> map) {
try {
JsonUtilClient.toDatanodeInfo(map);
Assert.fail("Exception not thrown against bad input.");
} catch (Exception e) {
// expected
}
}
}
| 10,510 | 38.664151 | 173 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.verify;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
import java.net.URI;
import java.util.HashMap;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.web.resources.ExceptionHandler;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ipc.StandbyException;
import org.apache.hadoop.security.token.SecretManager;
import org.apache.hadoop.security.token.Token;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.internal.util.reflection.Whitebox;
import org.mortbay.util.ajax.JSON;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.core.Response;
public class TestWebHDFSForHA {
private static final String LOGICAL_NAME = "minidfs";
private static final URI WEBHDFS_URI = URI.create(WebHdfsConstants.WEBHDFS_SCHEME +
"://" + LOGICAL_NAME);
private static final MiniDFSNNTopology topo = new MiniDFSNNTopology()
.addNameservice(new MiniDFSNNTopology.NSConf(LOGICAL_NAME).addNN(
new MiniDFSNNTopology.NNConf("nn1")).addNN(
new MiniDFSNNTopology.NNConf("nn2")));
@Test
public void testHA() throws IOException {
Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
MiniDFSCluster cluster = null;
FileSystem fs = null;
try {
cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
.numDataNodes(0).build();
HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);
cluster.waitActive();
fs = FileSystem.get(WEBHDFS_URI, conf);
cluster.transitionToActive(0);
final Path dir = new Path("/test");
Assert.assertTrue(fs.mkdirs(dir));
cluster.shutdownNameNode(0);
cluster.transitionToActive(1);
final Path dir2 = new Path("/test2");
Assert.assertTrue(fs.mkdirs(dir2));
} finally {
IOUtils.cleanup(null, fs);
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test
public void testSecureHAToken() throws IOException, InterruptedException {
Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
conf.setBoolean(DFSConfigKeys
.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
MiniDFSCluster cluster = null;
WebHdfsFileSystem fs = null;
try {
cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
.numDataNodes(0).build();
HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);
cluster.waitActive();
fs = spy((WebHdfsFileSystem) FileSystem.get(WEBHDFS_URI, conf));
FileSystemTestHelper.addFileSystemForTesting(WEBHDFS_URI, conf, fs);
cluster.transitionToActive(0);
Token<?> token = fs.getDelegationToken(null);
cluster.shutdownNameNode(0);
cluster.transitionToActive(1);
token.renew(conf);
token.cancel(conf);
verify(fs).renewDelegationToken(token);
verify(fs).cancelDelegationToken(token);
} finally {
IOUtils.cleanup(null, fs);
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test
public void testClientFailoverWhenStandbyNNHasStaleCredentials()
throws IOException {
Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
conf.setBoolean(DFSConfigKeys
.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
MiniDFSCluster cluster = null;
WebHdfsFileSystem fs = null;
try {
cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo).numDataNodes(
0).build();
HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);
cluster.waitActive();
fs = (WebHdfsFileSystem) FileSystem.get(WEBHDFS_URI, conf);
cluster.transitionToActive(0);
Token<?> token = fs.getDelegationToken(null);
final DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
identifier.readFields(
new DataInputStream(new ByteArrayInputStream(token.getIdentifier())));
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
final DelegationTokenSecretManager secretManager = NameNodeAdapter.getDtSecretManager(
cluster.getNamesystem(0));
ExceptionHandler eh = new ExceptionHandler();
eh.initResponse(mock(HttpServletResponse.class));
Response resp = null;
try {
secretManager.retrievePassword(identifier);
} catch (IOException e) {
// Mimic the UserProvider class logic (server side) by throwing
// SecurityException here
Assert.assertTrue(e instanceof SecretManager.InvalidToken);
resp = eh.toResponse(new SecurityException(e));
}
// The Response (resp) below is what the server will send to client
//
// BEFORE HDFS-6475 fix, the resp.entity is
// {"RemoteException":{"exception":"SecurityException",
// "javaClassName":"java.lang.SecurityException",
// "message":"Failed to obtain user group information:
// org.apache.hadoop.security.token.SecretManager$InvalidToken:
// StandbyException"}}
// AFTER the fix, the resp.entity is
// {"RemoteException":{"exception":"StandbyException",
// "javaClassName":"org.apache.hadoop.ipc.StandbyException",
// "message":"Operation category READ is not supported in
// state standby"}}
//
// Mimic the client side logic by parsing the response from server
//
Map<?, ?> m = (Map<?, ?>) JSON.parse(resp.getEntity().toString());
RemoteException re = JsonUtilClient.toRemoteException(m);
Exception unwrapped = re.unwrapRemoteException(StandbyException.class);
Assert.assertTrue(unwrapped instanceof StandbyException);
} finally {
IOUtils.cleanup(null, fs);
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test
public void testFailoverAfterOpen() throws IOException {
Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
conf.set(FS_DEFAULT_NAME_KEY, HdfsConstants.HDFS_URI_SCHEME +
"://" + LOGICAL_NAME);
MiniDFSCluster cluster = null;
FileSystem fs = null;
final Path p = new Path("/test");
final byte[] data = "Hello".getBytes();
try {
cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
.numDataNodes(1).build();
HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);
cluster.waitActive();
fs = FileSystem.get(WEBHDFS_URI, conf);
cluster.transitionToActive(1);
FSDataOutputStream out = fs.create(p);
cluster.shutdownNameNode(1);
cluster.transitionToActive(0);
out.write(data);
out.close();
FSDataInputStream in = fs.open(p);
byte[] buf = new byte[data.length];
IOUtils.readFully(in, buf, 0, buf.length);
Assert.assertArrayEquals(data, buf);
} finally {
IOUtils.cleanup(null, fs);
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test
public void testMultipleNamespacesConfigured() throws Exception {
Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
MiniDFSCluster cluster = null;
WebHdfsFileSystem fs = null;
try {
cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
.numDataNodes(1).build();
HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);
cluster.waitActive();
DFSTestUtil.addHAConfiguration(conf, LOGICAL_NAME + "remote");
DFSTestUtil.setFakeHttpAddresses(conf, LOGICAL_NAME + "remote");
fs = (WebHdfsFileSystem)FileSystem.get(WEBHDFS_URI, conf);
Assert.assertEquals(2, fs.getResolvedNNAddr().length);
} finally {
IOUtils.cleanup(null, fs);
if (cluster != null) {
cluster.shutdown();
}
}
}
/**
* Make sure the WebHdfsFileSystem will retry based on RetriableException when
* rpcServer is null in NamenodeWebHdfsMethods while NameNode starts up.
*/
@Test (timeout=120000)
public void testRetryWhileNNStartup() throws Exception {
final Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
MiniDFSCluster cluster = null;
final Map<String, Boolean> resultMap = new HashMap<String, Boolean>();
try {
cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
.numDataNodes(0).build();
HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);
cluster.waitActive();
cluster.transitionToActive(0);
final NameNode namenode = cluster.getNameNode(0);
final NamenodeProtocols rpcServer = namenode.getRpcServer();
Whitebox.setInternalState(namenode, "rpcServer", null);
new Thread() {
@Override
public void run() {
boolean result = false;
FileSystem fs = null;
try {
fs = FileSystem.get(WEBHDFS_URI, conf);
final Path dir = new Path("/test");
result = fs.mkdirs(dir);
} catch (IOException e) {
result = false;
} finally {
IOUtils.cleanup(null, fs);
}
synchronized (TestWebHDFSForHA.this) {
resultMap.put("mkdirs", result);
TestWebHDFSForHA.this.notifyAll();
}
}
}.start();
Thread.sleep(1000);
Whitebox.setInternalState(namenode, "rpcServer", rpcServer);
synchronized (this) {
while (!resultMap.containsKey("mkdirs")) {
this.wait();
}
Assert.assertTrue(resultMap.get("mkdirs"));
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
}
| 11,693 | 34.329305 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSXAttr.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web;
import org.apache.hadoop.hdfs.server.namenode.FSXAttrBaseTest;
/**
* Tests XAttr APIs via WebHDFS.
*/
public class TestWebHDFSXAttr extends FSXAttrBaseTest {
/**
* Overridden to provide a WebHdfsFileSystem wrapper for the super-user.
*
* @return WebHdfsFileSystem for super-user
* @throws Exception if creation fails
*/
@Override
protected WebHdfsFileSystem createFileSystem() throws Exception {
return WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
}
}
| 1,358 | 35.72973 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTokens.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web;
import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS;
import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.SIMPLE;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.mockito.Matchers.*;
import static org.mockito.Mockito.*;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.net.HttpURLConnection;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URL;
import java.net.URLConnection;
import java.security.PrivilegedExceptionAction;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.web.resources.*;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.security.token.Token;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
import org.mockito.internal.util.reflection.Whitebox;
public class TestWebHdfsTokens {
private static Configuration conf;
URI uri = null;
@BeforeClass
public static void setUp() {
conf = new Configuration();
SecurityUtil.setAuthenticationMethod(KERBEROS, conf);
UserGroupInformation.setConfiguration(conf);
UserGroupInformation.setLoginUser(
UserGroupInformation.createUserForTesting(
"LoginUser", new String[]{"supergroup"}));
}
private WebHdfsFileSystem spyWebhdfsInSecureSetup() throws IOException {
WebHdfsFileSystem fsOrig = new WebHdfsFileSystem();
fsOrig.initialize(URI.create("webhdfs://127.0.0.1:0"), conf);
WebHdfsFileSystem fs = spy(fsOrig);
return fs;
}
@Test(timeout = 5000)
public void testTokenForNonTokenOp() throws IOException {
WebHdfsFileSystem fs = spyWebhdfsInSecureSetup();
Token<?> token = mock(Token.class);
doReturn(token).when(fs).getDelegationToken(null);
// should get/set/renew token
fs.toUrl(GetOpParam.Op.OPEN, null);
verify(fs).getDelegationToken();
verify(fs).getDelegationToken(null);
verify(fs).setDelegationToken(token);
reset(fs);
// should return prior token
fs.toUrl(GetOpParam.Op.OPEN, null);
verify(fs).getDelegationToken();
verify(fs, never()).getDelegationToken(null);
verify(fs, never()).setDelegationToken(token);
}
@Test(timeout = 5000)
public void testNoTokenForGetToken() throws IOException {
checkNoTokenForOperation(GetOpParam.Op.GETDELEGATIONTOKEN);
}
@Test(timeout = 5000)
public void testNoTokenForRenewToken() throws IOException {
checkNoTokenForOperation(PutOpParam.Op.RENEWDELEGATIONTOKEN);
}
@Test(timeout = 5000)
public void testNoTokenForCancelToken() throws IOException {
checkNoTokenForOperation(PutOpParam.Op.CANCELDELEGATIONTOKEN);
}
private void checkNoTokenForOperation(HttpOpParam.Op op) throws IOException {
WebHdfsFileSystem fs = spyWebhdfsInSecureSetup();
doReturn(null).when(fs).getDelegationToken(null);
fs.initialize(URI.create("webhdfs://127.0.0.1:0"), conf);
// do not get a token!
fs.toUrl(op, null);
verify(fs, never()).getDelegationToken();
verify(fs, never()).getDelegationToken(null);
verify(fs, never()).setDelegationToken((Token<?>)any(Token.class));
}
@Test(timeout = 1000)
public void testGetOpRequireAuth() {
for (HttpOpParam.Op op : GetOpParam.Op.values()) {
boolean expect = (op == GetOpParam.Op.GETDELEGATIONTOKEN);
assertEquals(expect, op.getRequireAuth());
}
}
@Test(timeout = 1000)
public void testPutOpRequireAuth() {
for (HttpOpParam.Op op : PutOpParam.Op.values()) {
boolean expect = (op == PutOpParam.Op.RENEWDELEGATIONTOKEN || op == PutOpParam.Op.CANCELDELEGATIONTOKEN);
assertEquals(expect, op.getRequireAuth());
}
}
@Test(timeout = 1000)
public void testPostOpRequireAuth() {
for (HttpOpParam.Op op : PostOpParam.Op.values()) {
assertFalse(op.getRequireAuth());
}
}
@Test(timeout = 1000)
public void testDeleteOpRequireAuth() {
for (HttpOpParam.Op op : DeleteOpParam.Op.values()) {
assertFalse(op.getRequireAuth());
}
}
@SuppressWarnings("unchecked") // for any(Token.class)
@Test
public void testLazyTokenFetchForWebhdfs() throws Exception {
MiniDFSCluster cluster = null;
WebHdfsFileSystem fs = null;
try {
final Configuration clusterConf = new HdfsConfiguration(conf);
SecurityUtil.setAuthenticationMethod(SIMPLE, clusterConf);
clusterConf.setBoolean(DFSConfigKeys
.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
// trick the NN into thinking security is enabled w/o it trying
// to login from a keytab
UserGroupInformation.setConfiguration(clusterConf);
cluster = new MiniDFSCluster.Builder(clusterConf).numDataNodes(1).build();
cluster.waitActive();
SecurityUtil.setAuthenticationMethod(KERBEROS, clusterConf);
UserGroupInformation.setConfiguration(clusterConf);
uri = DFSUtil.createUri(
"webhdfs", cluster.getNameNode().getHttpAddress());
validateLazyTokenFetch(clusterConf);
} finally {
IOUtils.cleanup(null, fs);
if (cluster != null) {
cluster.shutdown();
}
}
}
@SuppressWarnings("unchecked") // for any(Token.class)
@Test
public void testLazyTokenFetchForSWebhdfs() throws Exception {
MiniDFSCluster cluster = null;
SWebHdfsFileSystem fs = null;
try {
final Configuration clusterConf = new HdfsConfiguration(conf);
SecurityUtil.setAuthenticationMethod(SIMPLE, clusterConf);
clusterConf.setBoolean(DFSConfigKeys
.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
String BASEDIR = System.getProperty("test.build.dir",
"target/test-dir") + "/" + TestWebHdfsTokens.class.getSimpleName();
String keystoresDir;
String sslConfDir;
clusterConf.setBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
clusterConf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
clusterConf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
clusterConf.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
File base = new File(BASEDIR);
FileUtil.fullyDelete(base);
base.mkdirs();
keystoresDir = new File(BASEDIR).getAbsolutePath();
sslConfDir = KeyStoreTestUtil.getClasspathDir(TestWebHdfsTokens.class);
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, clusterConf, false);
// trick the NN into thinking security is enabled w/o it trying
// to login from a keytab
UserGroupInformation.setConfiguration(clusterConf);
cluster = new MiniDFSCluster.Builder(clusterConf).numDataNodes(1).build();
cluster.waitActive();
InetSocketAddress addr = cluster.getNameNode().getHttpsAddress();
String nnAddr = NetUtils.getHostPortString(addr);
clusterConf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, nnAddr);
SecurityUtil.setAuthenticationMethod(KERBEROS, clusterConf);
UserGroupInformation.setConfiguration(clusterConf);
uri = DFSUtil.createUri(
"swebhdfs", cluster.getNameNode().getHttpsAddress());
validateLazyTokenFetch(clusterConf);
} finally {
IOUtils.cleanup(null, fs);
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test
public void testSetTokenServiceAndKind() throws Exception {
MiniDFSCluster cluster = null;
try {
final Configuration clusterConf = new HdfsConfiguration(conf);
SecurityUtil.setAuthenticationMethod(SIMPLE, clusterConf);
clusterConf.setBoolean(DFSConfigKeys
.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
// trick the NN into thinking s[ecurity is enabled w/o it trying
// to login from a keytab
UserGroupInformation.setConfiguration(clusterConf);
cluster = new MiniDFSCluster.Builder(clusterConf).numDataNodes(0).build();
cluster.waitActive();
SecurityUtil.setAuthenticationMethod(KERBEROS, clusterConf);
final WebHdfsFileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem
(clusterConf, "webhdfs");
Whitebox.setInternalState(fs, "canRefreshDelegationToken", true);
URLConnectionFactory factory = new URLConnectionFactory(new ConnectionConfigurator() {
@Override
public HttpURLConnection configure(HttpURLConnection conn)
throws IOException {
return conn;
}
}) {
@Override
public URLConnection openConnection(URL url) throws IOException {
return super.openConnection(new URL(url + "&service=foo&kind=bar"));
}
};
Whitebox.setInternalState(fs, "connectionFactory", factory);
Token<?> token1 = fs.getDelegationToken();
Assert.assertEquals(new Text("bar"), token1.getKind());
final HttpOpParam.Op op = GetOpParam.Op.GETDELEGATIONTOKEN;
Token<DelegationTokenIdentifier> token2 =
fs.new FsPathResponseRunner<Token<DelegationTokenIdentifier>>(
op, null, new RenewerParam(null)) {
@Override
Token<DelegationTokenIdentifier> decodeResponse(Map<?, ?> json)
throws IOException {
return JsonUtilClient.toDelegationToken(json);
}
}.run();
Assert.assertEquals(new Text("bar"), token2.getKind());
Assert.assertEquals(new Text("foo"), token2.getService());
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@SuppressWarnings("unchecked")
private void validateLazyTokenFetch(final Configuration clusterConf) throws Exception{
final String testUser = "DummyUser";
UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
testUser, new String[]{"supergroup"});
WebHdfsFileSystem fs = ugi.doAs(new PrivilegedExceptionAction<WebHdfsFileSystem>() {
@Override
public WebHdfsFileSystem run() throws IOException {
return spy((WebHdfsFileSystem) FileSystem.newInstance(uri, clusterConf));
}
});
// verify token ops don't get a token
Assert.assertNull(fs.getRenewToken());
Token<?> token = fs.getDelegationToken(null);
fs.renewDelegationToken(token);
fs.cancelDelegationToken(token);
verify(fs, never()).getDelegationToken();
verify(fs, never()).replaceExpiredDelegationToken();
verify(fs, never()).setDelegationToken(any(Token.class));
Assert.assertNull(fs.getRenewToken());
reset(fs);
// verify first non-token op gets a token
final Path p = new Path("/f");
fs.create(p, (short)1).close();
verify(fs, times(1)).getDelegationToken();
verify(fs, never()).replaceExpiredDelegationToken();
verify(fs, times(1)).getDelegationToken(anyString());
verify(fs, times(1)).setDelegationToken(any(Token.class));
token = fs.getRenewToken();
Assert.assertNotNull(token);
Assert.assertEquals(testUser, getTokenOwner(token));
Assert.assertEquals(fs.getTokenKind(), token.getKind());
reset(fs);
// verify prior token is reused
fs.getFileStatus(p);
verify(fs, times(1)).getDelegationToken();
verify(fs, never()).replaceExpiredDelegationToken();
verify(fs, never()).getDelegationToken(anyString());
verify(fs, never()).setDelegationToken(any(Token.class));
Token<?> token2 = fs.getRenewToken();
Assert.assertNotNull(token2);
Assert.assertEquals(fs.getTokenKind(), token.getKind());
Assert.assertSame(token, token2);
reset(fs);
// verify renew of expired token fails w/o getting a new token
token = fs.getRenewToken();
fs.cancelDelegationToken(token);
try {
fs.renewDelegationToken(token);
Assert.fail("should have failed");
} catch (InvalidToken it) {
} catch (Exception ex) {
Assert.fail("wrong exception:"+ex);
}
verify(fs, never()).getDelegationToken();
verify(fs, never()).replaceExpiredDelegationToken();
verify(fs, never()).getDelegationToken(anyString());
verify(fs, never()).setDelegationToken(any(Token.class));
token2 = fs.getRenewToken();
Assert.assertNotNull(token2);
Assert.assertEquals(fs.getTokenKind(), token.getKind());
Assert.assertSame(token, token2);
reset(fs);
// verify cancel of expired token fails w/o getting a new token
try {
fs.cancelDelegationToken(token);
Assert.fail("should have failed");
} catch (InvalidToken it) {
} catch (Exception ex) {
Assert.fail("wrong exception:"+ex);
}
verify(fs, never()).getDelegationToken();
verify(fs, never()).replaceExpiredDelegationToken();
verify(fs, never()).getDelegationToken(anyString());
verify(fs, never()).setDelegationToken(any(Token.class));
token2 = fs.getRenewToken();
Assert.assertNotNull(token2);
Assert.assertEquals(fs.getTokenKind(), token.getKind());
Assert.assertSame(token, token2);
reset(fs);
// verify an expired token is replaced with a new token
fs.open(p).close();
verify(fs, times(2)).getDelegationToken(); // first bad, then good
verify(fs, times(1)).replaceExpiredDelegationToken();
verify(fs, times(1)).getDelegationToken(null);
verify(fs, times(1)).setDelegationToken(any(Token.class));
token2 = fs.getRenewToken();
Assert.assertNotNull(token2);
Assert.assertEquals(fs.getTokenKind(), token.getKind());
Assert.assertNotSame(token, token2);
Assert.assertEquals(testUser, getTokenOwner(token2));
reset(fs);
// verify with open because it's a little different in how it
// opens connections
fs.cancelDelegationToken(fs.getRenewToken());
InputStream is = fs.open(p);
is.read();
is.close();
verify(fs, times(2)).getDelegationToken(); // first bad, then good
verify(fs, times(1)).replaceExpiredDelegationToken();
verify(fs, times(1)).getDelegationToken(null);
verify(fs, times(1)).setDelegationToken(any(Token.class));
token2 = fs.getRenewToken();
Assert.assertNotNull(token2);
Assert.assertEquals(fs.getTokenKind(), token.getKind());
Assert.assertNotSame(token, token2);
Assert.assertEquals(testUser, getTokenOwner(token2));
reset(fs);
// verify fs close cancels the token
fs.close();
verify(fs, never()).getDelegationToken();
verify(fs, never()).replaceExpiredDelegationToken();
verify(fs, never()).getDelegationToken(anyString());
verify(fs, never()).setDelegationToken(any(Token.class));
verify(fs, times(1)).cancelDelegationToken(eq(token2));
// add a token to ugi for a new fs, verify it uses that token
token = fs.getDelegationToken(null);
ugi.addToken(token);
fs = ugi.doAs(new PrivilegedExceptionAction<WebHdfsFileSystem>() {
@Override
public WebHdfsFileSystem run() throws IOException {
return spy((WebHdfsFileSystem) FileSystem.newInstance(uri, clusterConf));
}
});
Assert.assertNull(fs.getRenewToken());
fs.getFileStatus(new Path("/"));
verify(fs, times(1)).getDelegationToken();
verify(fs, never()).replaceExpiredDelegationToken();
verify(fs, never()).getDelegationToken(anyString());
verify(fs, times(1)).setDelegationToken(eq(token));
token2 = fs.getRenewToken();
Assert.assertNotNull(token2);
Assert.assertEquals(fs.getTokenKind(), token.getKind());
Assert.assertSame(token, token2);
reset(fs);
// verify it reuses the prior ugi token
fs.getFileStatus(new Path("/"));
verify(fs, times(1)).getDelegationToken();
verify(fs, never()).replaceExpiredDelegationToken();
verify(fs, never()).getDelegationToken(anyString());
verify(fs, never()).setDelegationToken(any(Token.class));
token2 = fs.getRenewToken();
Assert.assertNotNull(token2);
Assert.assertEquals(fs.getTokenKind(), token.getKind());
Assert.assertSame(token, token2);
reset(fs);
// verify an expired ugi token is NOT replaced with a new token
fs.cancelDelegationToken(token);
for (int i=0; i<2; i++) {
try {
fs.getFileStatus(new Path("/"));
Assert.fail("didn't fail");
} catch (InvalidToken it) {
} catch (Exception ex) {
Assert.fail("wrong exception:"+ex);
}
verify(fs, times(1)).getDelegationToken();
verify(fs, times(1)).replaceExpiredDelegationToken();
verify(fs, never()).getDelegationToken(anyString());
verify(fs, never()).setDelegationToken(any(Token.class));
token2 = fs.getRenewToken();
Assert.assertNotNull(token2);
Assert.assertEquals(fs.getTokenKind(), token.getKind());
Assert.assertSame(token, token2);
reset(fs);
}
// verify fs close does NOT cancel the ugi token
fs.close();
verify(fs, never()).getDelegationToken();
verify(fs, never()).replaceExpiredDelegationToken();
verify(fs, never()).getDelegationToken(anyString());
verify(fs, never()).setDelegationToken(any(Token.class));
verify(fs, never()).cancelDelegationToken(any(Token.class));
}
private String getTokenOwner(Token<?> token) throws IOException {
// webhdfs doesn't register properly with the class loader
@SuppressWarnings({ "rawtypes", "unchecked" })
Token<?> clone = new Token(token);
clone.setKind(DelegationTokenIdentifier.HDFS_DELEGATION_KIND);
return clone.decodeIdentifier().getUser().getUserName();
}
}
| 19,165 | 37.797571 | 111 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsContentLength.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.ServerSocket;
import java.net.Socket;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.net.NetUtils;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestWebHdfsContentLength {
private static ServerSocket listenSocket;
private static String bindAddr;
private static Path p;
private static FileSystem fs;
private static final Pattern contentLengthPattern = Pattern.compile(
"^(Content-Length|Transfer-Encoding):\\s*(.*)", Pattern.MULTILINE);
private static String errResponse =
"HTTP/1.1 500 Boom\r\n" +
"Content-Length: 0\r\n" +
"Connection: close\r\n\r\n";
private static String redirectResponse;
private static ExecutorService executor;
@BeforeClass
public static void setup() throws IOException {
listenSocket = new ServerSocket();
listenSocket.bind(null);
bindAddr = NetUtils.getHostPortString(
(InetSocketAddress)listenSocket.getLocalSocketAddress());
redirectResponse =
"HTTP/1.1 307 Redirect\r\n" +
"Location: http://"+bindAddr+"/path\r\n" +
"Connection: close\r\n\r\n";
p = new Path("webhdfs://"+bindAddr+"/path");
fs = p.getFileSystem(new Configuration());
executor = Executors.newSingleThreadExecutor();
}
@AfterClass
public static void teardown() throws IOException {
if (listenSocket != null) {
listenSocket.close();
}
if (executor != null) {
executor.shutdownNow();
}
}
@Test
public void testGetOp() throws Exception {
Future<String> future = contentLengthFuture(errResponse);
try {
fs.getFileStatus(p);
Assert.fail();
} catch (IOException ioe) {} // expected
Assert.assertEquals(null, getContentLength(future));
}
@Test
public void testGetOpWithRedirect() {
Future<String> future1 = contentLengthFuture(redirectResponse);
Future<String> future2 = contentLengthFuture(errResponse);
try {
fs.open(p).read();
Assert.fail();
} catch (IOException ioe) {} // expected
Assert.assertEquals(null, getContentLength(future1));
Assert.assertEquals(null, getContentLength(future2));
}
@Test
public void testPutOp() {
Future<String> future = contentLengthFuture(errResponse);
try {
fs.mkdirs(p);
Assert.fail();
} catch (IOException ioe) {} // expected
Assert.assertEquals("0", getContentLength(future));
}
@Test
public void testPutOpWithRedirect() {
Future<String> future1 = contentLengthFuture(redirectResponse);
Future<String> future2 = contentLengthFuture(errResponse);
try {
FSDataOutputStream os = fs.create(p);
os.write(new byte[]{0});
os.close();
Assert.fail();
} catch (IOException ioe) {} // expected
Assert.assertEquals("0", getContentLength(future1));
Assert.assertEquals("chunked", getContentLength(future2));
}
@Test
public void testPostOp() {
Future<String> future = contentLengthFuture(errResponse);
try {
fs.concat(p, new Path[]{p});
Assert.fail();
} catch (IOException ioe) {} // expected
Assert.assertEquals("0", getContentLength(future));
}
@Test
public void testPostOpWithRedirect() {
// POST operation with redirect
Future<String> future1 = contentLengthFuture(redirectResponse);
Future<String> future2 = contentLengthFuture(errResponse);
try {
FSDataOutputStream os = fs.append(p);
os.write(new byte[]{0});
os.close();
Assert.fail();
} catch (IOException ioe) {} // expected
Assert.assertEquals("0", getContentLength(future1));
Assert.assertEquals("chunked", getContentLength(future2));
}
@Test
public void testDelete() {
Future<String> future = contentLengthFuture(errResponse);
try {
fs.delete(p, false);
Assert.fail();
} catch (IOException ioe) {} // expected
Assert.assertEquals(null, getContentLength(future));
}
private String getContentLength(Future<String> future) {
String request = null;
try {
request = future.get(2, TimeUnit.SECONDS);
} catch (Exception e) {
Assert.fail(e.toString());
}
Matcher matcher = contentLengthPattern.matcher(request);
return matcher.find() ? matcher.group(2) : null;
}
private Future<String> contentLengthFuture(final String response) {
return executor.submit(new Callable<String>() {
@Override
public String call() throws Exception {
Socket client = listenSocket.accept();
client.setSoTimeout(2000);
try {
client.getOutputStream().write(response.getBytes());
client.shutdownOutput();
byte[] buf = new byte[4*1024]; // much bigger than request
int n = client.getInputStream().read(buf);
return new String(buf, 0, n);
} finally {
client.close();
}
}
});
}
}
| 6,257 | 30.606061 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.net.URL;
import java.util.List;
import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
import org.junit.Assert;
import org.junit.Test;
import com.google.common.collect.Lists;
public final class TestURLConnectionFactory {
@Test
public void testConnConfiguratior() throws IOException {
final URL u = new URL("http://localhost");
final List<HttpURLConnection> conns = Lists.newArrayList();
URLConnectionFactory fc = new URLConnectionFactory(new ConnectionConfigurator() {
@Override
public HttpURLConnection configure(HttpURLConnection conn)
throws IOException {
Assert.assertEquals(u, conn.getURL());
conns.add(conn);
return conn;
}
});
fc.openConnection(u);
Assert.assertEquals(1, conns.size());
}
}
| 1,729 | 32.921569 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web;
import java.io.BufferedReader;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.net.HttpURLConnection;
import java.net.URL;
import java.util.Arrays;
import java.util.Map;
import java.util.Random;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.core.MediaType;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystemContractBaseTest;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.AppendTestUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.web.resources.*;
import org.apache.hadoop.hdfs.web.resources.NamenodeAddressParam;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.Assert;
import org.junit.Test;
public class TestWebHdfsFileSystemContract extends FileSystemContractBaseTest {
private static final Configuration conf = new Configuration();
private static final MiniDFSCluster cluster;
private String defaultWorkingDirectory;
private UserGroupInformation ugi;
static {
conf.setBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
cluster.waitActive();
//change root permission to 777
cluster.getFileSystem().setPermission(
new Path("/"), new FsPermission((short)0777));
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
protected void setUp() throws Exception {
//get file system as a non-superuser
final UserGroupInformation current = UserGroupInformation.getCurrentUser();
ugi = UserGroupInformation.createUserForTesting(
current.getShortUserName() + "x", new String[]{"user"});
fs = WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, conf, WebHdfsConstants.WEBHDFS_SCHEME);
defaultWorkingDirectory = fs.getWorkingDirectory().toUri().getPath();
}
@Override
protected String getDefaultWorkingDirectory() {
return defaultWorkingDirectory;
}
/** HDFS throws AccessControlException
* when calling exist(..) on a path /foo/bar/file
* but /foo/bar is indeed a file in HDFS.
*/
@Override
public void testMkdirsFailsForSubdirectoryOfExistingFile() throws Exception {
Path testDir = path("/test/hadoop");
assertFalse(fs.exists(testDir));
assertTrue(fs.mkdirs(testDir));
assertTrue(fs.exists(testDir));
createFile(path("/test/hadoop/file"));
Path testSubDir = path("/test/hadoop/file/subdir");
try {
fs.mkdirs(testSubDir);
fail("Should throw IOException.");
} catch (IOException e) {
// expected
}
try {
assertFalse(fs.exists(testSubDir));
} catch(AccessControlException e) {
// also okay for HDFS.
}
Path testDeepSubDir = path("/test/hadoop/file/deep/sub/dir");
try {
fs.mkdirs(testDeepSubDir);
fail("Should throw IOException.");
} catch (IOException e) {
// expected
}
try {
assertFalse(fs.exists(testDeepSubDir));
} catch(AccessControlException e) {
// also okay for HDFS.
}
}
//the following are new tests (i.e. not over-riding the super class methods)
public void testGetFileBlockLocations() throws IOException {
final String f = "/test/testGetFileBlockLocations";
createFile(path(f));
final BlockLocation[] computed = fs.getFileBlockLocations(new Path(f), 0L, 1L);
final BlockLocation[] expected = cluster.getFileSystem().getFileBlockLocations(
new Path(f), 0L, 1L);
assertEquals(expected.length, computed.length);
for (int i = 0; i < computed.length; i++) {
assertEquals(expected[i].toString(), computed[i].toString());
// Check names
String names1[] = expected[i].getNames();
String names2[] = computed[i].getNames();
Arrays.sort(names1);
Arrays.sort(names2);
Assert.assertArrayEquals("Names differ", names1, names2);
// Check topology
String topos1[] = expected[i].getTopologyPaths();
String topos2[] = computed[i].getTopologyPaths();
Arrays.sort(topos1);
Arrays.sort(topos2);
Assert.assertArrayEquals("Topology differs", topos1, topos2);
}
}
public void testCaseInsensitive() throws IOException {
final Path p = new Path("/test/testCaseInsensitive");
final WebHdfsFileSystem webhdfs = (WebHdfsFileSystem)fs;
final PutOpParam.Op op = PutOpParam.Op.MKDIRS;
//replace query with mix case letters
final URL url = webhdfs.toUrl(op, p);
WebHdfsFileSystem.LOG.info("url = " + url);
final URL replaced = new URL(url.toString().replace(op.toQueryString(),
"Op=mkDIrs"));
WebHdfsFileSystem.LOG.info("replaced = " + replaced);
//connect with the replaced URL.
final HttpURLConnection conn = (HttpURLConnection)replaced.openConnection();
conn.setRequestMethod(op.getType().toString());
conn.connect();
final BufferedReader in = new BufferedReader(new InputStreamReader(
conn.getInputStream()));
for(String line; (line = in.readLine()) != null; ) {
WebHdfsFileSystem.LOG.info("> " + line);
}
//check if the command successes.
assertTrue(fs.getFileStatus(p).isDirectory());
}
public void testOpenNonExistFile() throws IOException {
final Path p = new Path("/test/testOpenNonExistFile");
//open it as a file, should get FileNotFoundException
try {
fs.open(p);
fail("Expected FileNotFoundException was not thrown");
} catch(FileNotFoundException fnfe) {
WebHdfsFileSystem.LOG.info("This is expected.", fnfe);
}
}
public void testSeek() throws IOException {
final Path dir = new Path("/test/testSeek");
assertTrue(fs.mkdirs(dir));
{ //test zero file size
final Path zero = new Path(dir, "zero");
fs.create(zero).close();
int count = 0;
final FSDataInputStream in = fs.open(zero);
for(; in.read() != -1; count++);
in.close();
assertEquals(0, count);
}
final byte[] mydata = new byte[1 << 20];
new Random().nextBytes(mydata);
final Path p = new Path(dir, "file");
FSDataOutputStream out = fs.create(p, false, 4096, (short)3, 1L << 17);
out.write(mydata, 0, mydata.length);
out.close();
final int one_third = mydata.length/3;
final int two_third = one_third*2;
{ //test seek
final int offset = one_third;
final int len = mydata.length - offset;
final byte[] buf = new byte[len];
final FSDataInputStream in = fs.open(p);
in.seek(offset);
//read all remaining data
in.readFully(buf);
in.close();
for (int i = 0; i < buf.length; i++) {
assertEquals("Position " + i + ", offset=" + offset + ", length=" + len,
mydata[i + offset], buf[i]);
}
}
{ //test position read (read the data after the two_third location)
final int offset = two_third;
final int len = mydata.length - offset;
final byte[] buf = new byte[len];
final FSDataInputStream in = fs.open(p);
in.readFully(offset, buf);
in.close();
for (int i = 0; i < buf.length; i++) {
assertEquals("Position " + i + ", offset=" + offset + ", length=" + len,
mydata[i + offset], buf[i]);
}
}
}
public void testRootDir() throws IOException {
final Path root = new Path("/");
final WebHdfsFileSystem webhdfs = (WebHdfsFileSystem)fs;
final URL url = webhdfs.toUrl(GetOpParam.Op.NULL, root);
WebHdfsFileSystem.LOG.info("null url=" + url);
Assert.assertTrue(url.toString().contains("v1"));
//test root permission
final FileStatus status = fs.getFileStatus(root);
assertTrue(status != null);
assertEquals(0777, status.getPermission().toShort());
//delete root
assertFalse(fs.delete(root, true));
//create file using root path
try {
final FSDataOutputStream out = fs.create(root);
out.write(1);
out.close();
fail();
} catch(IOException e) {
WebHdfsFileSystem.LOG.info("This is expected.", e);
}
//open file using root path
try {
final FSDataInputStream in = fs.open(root);
in.read();
fail();
} catch(IOException e) {
WebHdfsFileSystem.LOG.info("This is expected.", e);
}
}
/**
* Test get with length parameter greater than actual file length.
*/
public void testLengthParamLongerThanFile() throws IOException {
WebHdfsFileSystem webhdfs = (WebHdfsFileSystem)fs;
Path dir = new Path("/test");
assertTrue(webhdfs.mkdirs(dir));
// Create a file with some content.
Path testFile = new Path("/test/testLengthParamLongerThanFile");
String content = "testLengthParamLongerThanFile";
FSDataOutputStream testFileOut = webhdfs.create(testFile);
try {
testFileOut.write(content.getBytes("US-ASCII"));
} finally {
IOUtils.closeStream(testFileOut);
}
// Open the file, but request length longer than actual file length by 1.
HttpOpParam.Op op = GetOpParam.Op.OPEN;
URL url = webhdfs.toUrl(op, testFile, new LengthParam((long) (content
.length() + 1)));
HttpURLConnection conn = null;
InputStream is = null;
try {
conn = (HttpURLConnection)url.openConnection();
conn.setRequestMethod(op.getType().toString());
conn.setDoOutput(op.getDoOutput());
conn.setInstanceFollowRedirects(true);
// Expect OK response and Content-Length header equal to actual length.
assertEquals(HttpServletResponse.SC_OK, conn.getResponseCode());
assertEquals(String.valueOf(content.length()), conn.getHeaderField(
"Content-Length"));
// Check content matches.
byte[] respBody = new byte[content.length()];
is = conn.getInputStream();
IOUtils.readFully(is, respBody, 0, content.length());
assertEquals(content, new String(respBody, "US-ASCII"));
} finally {
IOUtils.closeStream(is);
if (conn != null) {
conn.disconnect();
}
}
}
/**
* Test get with offset and length parameters that combine to request a length
* greater than actual file length.
*/
public void testOffsetPlusLengthParamsLongerThanFile() throws IOException {
WebHdfsFileSystem webhdfs = (WebHdfsFileSystem)fs;
Path dir = new Path("/test");
assertTrue(webhdfs.mkdirs(dir));
// Create a file with some content.
Path testFile = new Path("/test/testOffsetPlusLengthParamsLongerThanFile");
String content = "testOffsetPlusLengthParamsLongerThanFile";
FSDataOutputStream testFileOut = webhdfs.create(testFile);
try {
testFileOut.write(content.getBytes("US-ASCII"));
} finally {
IOUtils.closeStream(testFileOut);
}
// Open the file, but request offset starting at 1 and length equal to file
// length. Considering the offset, this is longer than the actual content.
HttpOpParam.Op op = GetOpParam.Op.OPEN;
URL url = webhdfs.toUrl(op, testFile, new LengthParam(Long.valueOf(
content.length())), new OffsetParam(1L));
HttpURLConnection conn = null;
InputStream is = null;
try {
conn = (HttpURLConnection)url.openConnection();
conn.setRequestMethod(op.getType().toString());
conn.setDoOutput(op.getDoOutput());
conn.setInstanceFollowRedirects(true);
// Expect OK response and Content-Length header equal to actual length.
assertEquals(HttpServletResponse.SC_OK, conn.getResponseCode());
assertEquals(String.valueOf(content.length() - 1), conn.getHeaderField(
"Content-Length"));
// Check content matches.
byte[] respBody = new byte[content.length() - 1];
is = conn.getInputStream();
IOUtils.readFully(is, respBody, 0, content.length() - 1);
assertEquals(content.substring(1), new String(respBody, "US-ASCII"));
} finally {
IOUtils.closeStream(is);
if (conn != null) {
conn.disconnect();
}
}
}
public void testResponseCode() throws IOException {
final WebHdfsFileSystem webhdfs = (WebHdfsFileSystem)fs;
final Path root = new Path("/");
final Path dir = new Path("/test/testUrl");
assertTrue(webhdfs.mkdirs(dir));
final Path file = new Path("/test/file");
final FSDataOutputStream out = webhdfs.create(file);
out.write(1);
out.close();
{//test GETHOMEDIRECTORY
final URL url = webhdfs.toUrl(GetOpParam.Op.GETHOMEDIRECTORY, root);
final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
final Map<?, ?> m = WebHdfsTestUtil.connectAndGetJson(
conn, HttpServletResponse.SC_OK);
assertEquals(webhdfs.getHomeDirectory().toUri().getPath(),
m.get(Path.class.getSimpleName()));
conn.disconnect();
}
{//test GETHOMEDIRECTORY with unauthorized doAs
final URL url = webhdfs.toUrl(GetOpParam.Op.GETHOMEDIRECTORY, root,
new DoAsParam(ugi.getShortUserName() + "proxy"));
final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.connect();
assertEquals(HttpServletResponse.SC_FORBIDDEN, conn.getResponseCode());
conn.disconnect();
}
{//test set owner with empty parameters
final URL url = webhdfs.toUrl(PutOpParam.Op.SETOWNER, dir);
final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.connect();
assertEquals(HttpServletResponse.SC_BAD_REQUEST, conn.getResponseCode());
conn.disconnect();
}
{//test set replication on a directory
final HttpOpParam.Op op = PutOpParam.Op.SETREPLICATION;
final URL url = webhdfs.toUrl(op, dir);
final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod(op.getType().toString());
conn.connect();
assertEquals(HttpServletResponse.SC_OK, conn.getResponseCode());
assertFalse(webhdfs.setReplication(dir, (short)1));
conn.disconnect();
}
{//test get file status for a non-exist file.
final Path p = new Path(dir, "non-exist");
final URL url = webhdfs.toUrl(GetOpParam.Op.GETFILESTATUS, p);
final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.connect();
assertEquals(HttpServletResponse.SC_NOT_FOUND, conn.getResponseCode());
conn.disconnect();
}
{//test set permission with empty parameters
final HttpOpParam.Op op = PutOpParam.Op.SETPERMISSION;
final URL url = webhdfs.toUrl(op, dir);
final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod(op.getType().toString());
conn.connect();
assertEquals(HttpServletResponse.SC_OK, conn.getResponseCode());
assertEquals(0, conn.getContentLength());
assertEquals(MediaType.APPLICATION_OCTET_STREAM, conn.getContentType());
assertEquals((short)0755, webhdfs.getFileStatus(dir).getPermission().toShort());
conn.disconnect();
}
{//test append.
AppendTestUtil.testAppend(fs, new Path(dir, "append"));
}
{//test NamenodeAddressParam not set.
final HttpOpParam.Op op = PutOpParam.Op.CREATE;
final URL url = webhdfs.toUrl(op, dir);
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod(op.getType().toString());
conn.setDoOutput(false);
conn.setInstanceFollowRedirects(false);
conn.connect();
final String redirect = conn.getHeaderField("Location");
conn.disconnect();
//remove NamenodeAddressParam
WebHdfsFileSystem.LOG.info("redirect = " + redirect);
final int i = redirect.indexOf(NamenodeAddressParam.NAME);
final int j = redirect.indexOf("&", i);
String modified = redirect.substring(0, i - 1) + redirect.substring(j);
WebHdfsFileSystem.LOG.info("modified = " + modified);
//connect to datanode
conn = (HttpURLConnection)new URL(modified).openConnection();
conn.setRequestMethod(op.getType().toString());
conn.setDoOutput(op.getDoOutput());
conn.connect();
assertEquals(HttpServletResponse.SC_BAD_REQUEST, conn.getResponseCode());
}
{//test jsonParse with non-json type.
final HttpOpParam.Op op = GetOpParam.Op.OPEN;
final URL url = webhdfs.toUrl(op, file);
final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod(op.getType().toString());
conn.connect();
try {
WebHdfsFileSystem.jsonParse(conn, false);
fail();
} catch(IOException ioe) {
WebHdfsFileSystem.LOG.info("GOOD", ioe);
}
conn.disconnect();
}
{//test create with path containing spaces
HttpOpParam.Op op = PutOpParam.Op.CREATE;
Path path = new Path("/test/path with spaces");
URL url = webhdfs.toUrl(op, path);
HttpURLConnection conn = (HttpURLConnection)url.openConnection();
conn.setRequestMethod(op.getType().toString());
conn.setDoOutput(false);
conn.setInstanceFollowRedirects(false);
final String redirect;
try {
conn.connect();
assertEquals(HttpServletResponse.SC_TEMPORARY_REDIRECT,
conn.getResponseCode());
redirect = conn.getHeaderField("Location");
} finally {
conn.disconnect();
}
conn = (HttpURLConnection)new URL(redirect).openConnection();
conn.setRequestMethod(op.getType().toString());
conn.setDoOutput(op.getDoOutput());
try {
conn.connect();
assertEquals(HttpServletResponse.SC_CREATED, conn.getResponseCode());
} finally {
conn.disconnect();
}
}
}
@Test
public void testAccess() throws IOException, InterruptedException {
Path p1 = new Path("/pathX");
try {
UserGroupInformation ugi = UserGroupInformation.createUserForTesting("alpha",
new String[]{"beta"});
WebHdfsFileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, conf,
WebHdfsConstants.WEBHDFS_SCHEME);
fs.mkdirs(p1);
fs.setPermission(p1, new FsPermission((short) 0444));
fs.access(p1, FsAction.READ);
try {
fs.access(p1, FsAction.WRITE);
fail("The access call should have failed.");
} catch (AccessControlException e) {
// expected
}
Path badPath = new Path("/bad");
try {
fs.access(badPath, FsAction.READ);
fail("The access call should have failed");
} catch (FileNotFoundException e) {
// expected
}
} finally {
fs.delete(p1, true);
}
}
}
| 19,985 | 34.248677 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestTokenAspect.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNotSame;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.DelegationTokenRenewer;
import org.apache.hadoop.fs.DelegationTokenRenewer.RenewAction;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.util.Progressable;
import org.junit.Test;
import org.mockito.Mockito;
import org.mockito.internal.util.reflection.Whitebox;
public class TestTokenAspect {
private static class DummyFs extends FileSystem implements
DelegationTokenRenewer.Renewable, TokenAspect.TokenManagementDelegator {
private static final Text TOKEN_KIND = new Text("DummyFS Token");
private boolean emulateSecurityEnabled;
private TokenAspect<DummyFs> tokenAspect;
private final UserGroupInformation ugi = UserGroupInformation
.createUserForTesting("foo", new String[] { "bar" });
private URI uri;
@Override
public FSDataOutputStream append(Path f, int bufferSize,
Progressable progress) throws IOException {
return null;
}
@Override
public void cancelDelegationToken(Token<?> token) throws IOException {
}
@Override
public FSDataOutputStream create(Path f, FsPermission permission,
boolean overwrite, int bufferSize, short replication, long blockSize,
Progressable progress) throws IOException {
return null;
}
@Override
public boolean delete(Path f, boolean recursive) throws IOException {
return false;
}
@Override
public URI getCanonicalUri() {
return super.getCanonicalUri();
}
@Override
public FileStatus getFileStatus(Path f) throws IOException {
return null;
}
@Override
public Token<?> getRenewToken() {
return null;
}
@Override
public URI getUri() {
return uri;
}
@Override
public Path getWorkingDirectory() {
return null;
}
@Override
public void initialize(URI name, Configuration conf) throws IOException {
super.initialize(name, conf);
setConf(conf);
this.uri = URI.create(name.getScheme() + "://" + name.getAuthority());
tokenAspect = new TokenAspect<DummyFs>(this,
SecurityUtil.buildTokenService(uri), TOKEN_KIND);
if (emulateSecurityEnabled || UserGroupInformation.isSecurityEnabled()) {
tokenAspect.initDelegationToken(ugi);
}
}
@Override
public FileStatus[] listStatus(Path f) throws FileNotFoundException,
IOException {
return null;
}
@Override
public boolean mkdirs(Path f, FsPermission permission) throws IOException {
return false;
}
@Override
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
return null;
}
@Override
public boolean rename(Path src, Path dst) throws IOException {
return false;
}
@Override
public long renewDelegationToken(Token<?> token) throws IOException {
return 0;
}
@Override
public <T extends TokenIdentifier> void setDelegationToken(Token<T> token) {
}
@Override
public void setWorkingDirectory(Path new_dir) {
}
}
private static RenewAction<?> getActionFromTokenAspect(
TokenAspect<DummyFs> tokenAspect) {
return (RenewAction<?>) Whitebox.getInternalState(tokenAspect, "action");
}
@Test
public void testCachedInitialization() throws IOException, URISyntaxException {
Configuration conf = new Configuration();
DummyFs fs = spy(new DummyFs());
Token<TokenIdentifier> token = new Token<TokenIdentifier>(new byte[0],
new byte[0], DummyFs.TOKEN_KIND, new Text("127.0.0.1:1234"));
doReturn(token).when(fs).getDelegationToken(anyString());
doReturn(token).when(fs).getRenewToken();
fs.emulateSecurityEnabled = true;
fs.initialize(new URI("dummyfs://127.0.0.1:1234"), conf);
fs.tokenAspect.ensureTokenInitialized();
verify(fs, times(1)).getDelegationToken(null);
verify(fs, times(1)).setDelegationToken(token);
// For the second iteration, the token should be cached.
fs.tokenAspect.ensureTokenInitialized();
verify(fs, times(1)).getDelegationToken(null);
verify(fs, times(1)).setDelegationToken(token);
}
@Test
public void testGetRemoteToken() throws IOException, URISyntaxException {
Configuration conf = new Configuration();
DummyFs fs = spy(new DummyFs());
Token<TokenIdentifier> token = new Token<TokenIdentifier>(new byte[0],
new byte[0], DummyFs.TOKEN_KIND, new Text("127.0.0.1:1234"));
doReturn(token).when(fs).getDelegationToken(anyString());
doReturn(token).when(fs).getRenewToken();
fs.initialize(new URI("dummyfs://127.0.0.1:1234"), conf);
fs.tokenAspect.ensureTokenInitialized();
// Select a token, store and renew it
verify(fs).setDelegationToken(token);
assertNotNull(Whitebox.getInternalState(fs.tokenAspect, "dtRenewer"));
assertNotNull(Whitebox.getInternalState(fs.tokenAspect, "action"));
}
@Test
public void testGetRemoteTokenFailure() throws IOException,
URISyntaxException {
Configuration conf = new Configuration();
DummyFs fs = spy(new DummyFs());
IOException e = new IOException();
doThrow(e).when(fs).getDelegationToken(anyString());
fs.emulateSecurityEnabled = true;
fs.initialize(new URI("dummyfs://127.0.0.1:1234"), conf);
try {
fs.tokenAspect.ensureTokenInitialized();
} catch (IOException exc) {
assertEquals(e, exc);
}
}
@Test
public void testInitWithNoTokens() throws IOException, URISyntaxException {
Configuration conf = new Configuration();
DummyFs fs = spy(new DummyFs());
doReturn(null).when(fs).getDelegationToken(anyString());
fs.initialize(new URI("dummyfs://127.0.0.1:1234"), conf);
fs.tokenAspect.ensureTokenInitialized();
// No token will be selected.
verify(fs, never()).setDelegationToken(
Mockito.<Token<? extends TokenIdentifier>> any());
}
@Test
public void testInitWithUGIToken() throws IOException, URISyntaxException {
Configuration conf = new Configuration();
DummyFs fs = spy(new DummyFs());
doReturn(null).when(fs).getDelegationToken(anyString());
Token<TokenIdentifier> token = new Token<TokenIdentifier>(new byte[0],
new byte[0], DummyFs.TOKEN_KIND, new Text("127.0.0.1:1234"));
fs.ugi.addToken(token);
fs.ugi.addToken(new Token<TokenIdentifier>(new byte[0], new byte[0],
new Text("Other token"), new Text("127.0.0.1:8021")));
assertEquals("wrong tokens in user", 2, fs.ugi.getTokens().size());
fs.emulateSecurityEnabled = true;
fs.initialize(new URI("dummyfs://127.0.0.1:1234"), conf);
fs.tokenAspect.ensureTokenInitialized();
// Select a token from ugi (not from the remote host), store it but don't
// renew it
verify(fs).setDelegationToken(token);
verify(fs, never()).getDelegationToken(anyString());
assertNull(Whitebox.getInternalState(fs.tokenAspect, "dtRenewer"));
assertNull(Whitebox.getInternalState(fs.tokenAspect, "action"));
}
@Test
public void testRenewal() throws Exception {
Configuration conf = new Configuration();
Token<?> token1 = mock(Token.class);
Token<?> token2 = mock(Token.class);
final long renewCycle = 100;
DelegationTokenRenewer.renewCycle = renewCycle;
UserGroupInformation ugi = UserGroupInformation.createUserForTesting("foo",
new String[] { "bar" });
DummyFs fs = spy(new DummyFs());
doReturn(token1).doReturn(token2).when(fs).getDelegationToken(null);
doReturn(token1).when(fs).getRenewToken();
// cause token renewer to abandon the token
doThrow(new IOException("renew failed")).when(token1).renew(conf);
doThrow(new IOException("get failed")).when(fs).addDelegationTokens(null,
null);
final URI uri = new URI("dummyfs://127.0.0.1:1234");
TokenAspect<DummyFs> tokenAspect = new TokenAspect<DummyFs>(fs,
SecurityUtil.buildTokenService(uri), DummyFs.TOKEN_KIND);
fs.initialize(uri, conf);
tokenAspect.initDelegationToken(ugi);
// trigger token acquisition
tokenAspect.ensureTokenInitialized();
DelegationTokenRenewer.RenewAction<?> action = getActionFromTokenAspect(tokenAspect);
verify(fs).setDelegationToken(token1);
assertTrue(action.isValid());
// upon renewal, token will go bad based on above stubbing
Thread.sleep(renewCycle * 2);
assertSame(action, getActionFromTokenAspect(tokenAspect));
assertFalse(action.isValid());
// now that token is invalid, should get a new one
tokenAspect.ensureTokenInitialized();
verify(fs, times(2)).getDelegationToken(anyString());
verify(fs).setDelegationToken(token2);
assertNotSame(action, getActionFromTokenAspect(tokenAspect));
action = getActionFromTokenAspect(tokenAspect);
assertTrue(action.isValid());
}
}
| 10,930 | 33.48265 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web.resources;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import java.io.IOException;
import java.util.Arrays;
import java.util.EnumSet;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.XAttrCodec;
import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.util.StringUtils;
import org.junit.Assert;
import org.junit.Test;
public class TestParam {
public static final Log LOG = LogFactory.getLog(TestParam.class);
final Configuration conf = new Configuration();
@Test
public void testAccessTimeParam() {
final AccessTimeParam p = new AccessTimeParam(AccessTimeParam.DEFAULT);
Assert.assertEquals(-1L, p.getValue().longValue());
new AccessTimeParam(-1L);
try {
new AccessTimeParam(-2L);
Assert.fail();
} catch(IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
}
@Test
public void testBlockSizeParam() {
final BlockSizeParam p = new BlockSizeParam(BlockSizeParam.DEFAULT);
Assert.assertEquals(null, p.getValue());
Assert.assertEquals(
conf.getLongBytes(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT),
p.getValue(conf));
new BlockSizeParam(1L);
try {
new BlockSizeParam(0L);
Assert.fail();
} catch(IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
}
@Test
public void testBufferSizeParam() {
final BufferSizeParam p = new BufferSizeParam(BufferSizeParam.DEFAULT);
Assert.assertEquals(null, p.getValue());
Assert.assertEquals(
conf.getInt(CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,
CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT),
p.getValue(conf));
new BufferSizeParam(1);
try {
new BufferSizeParam(0);
Assert.fail();
} catch(IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
}
@Test
public void testDelegationParam() {
final DelegationParam p = new DelegationParam(DelegationParam.DEFAULT);
Assert.assertEquals(null, p.getValue());
}
@Test
public void testDestinationParam() {
final DestinationParam p = new DestinationParam(DestinationParam.DEFAULT);
Assert.assertEquals(null, p.getValue());
new DestinationParam("/abc");
try {
new DestinationParam("abc");
Assert.fail();
} catch(IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
}
@Test
public void testGroupParam() {
final GroupParam p = new GroupParam(GroupParam.DEFAULT);
Assert.assertEquals(null, p.getValue());
}
@Test
public void testModificationTimeParam() {
final ModificationTimeParam p = new ModificationTimeParam(ModificationTimeParam.DEFAULT);
Assert.assertEquals(-1L, p.getValue().longValue());
new ModificationTimeParam(-1L);
try {
new ModificationTimeParam(-2L);
Assert.fail();
} catch(IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
}
@Test
public void testOverwriteParam() {
final OverwriteParam p = new OverwriteParam(OverwriteParam.DEFAULT);
Assert.assertEquals(false, p.getValue());
new OverwriteParam("trUe");
try {
new OverwriteParam("abc");
Assert.fail();
} catch(IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
}
@Test
public void testOwnerParam() {
final OwnerParam p = new OwnerParam(OwnerParam.DEFAULT);
Assert.assertEquals(null, p.getValue());
}
@Test
public void testPermissionParam() {
final PermissionParam p = new PermissionParam(PermissionParam.DEFAULT);
Assert.assertEquals(new FsPermission((short)0755), p.getFsPermission());
new PermissionParam("0");
try {
new PermissionParam("-1");
Assert.fail();
} catch(IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
new PermissionParam("1777");
try {
new PermissionParam("2000");
Assert.fail();
} catch(IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
try {
new PermissionParam("8");
Assert.fail();
} catch(IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
try {
new PermissionParam("abc");
Assert.fail();
} catch(IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
}
@Test
public void testRecursiveParam() {
final RecursiveParam p = new RecursiveParam(RecursiveParam.DEFAULT);
Assert.assertEquals(false, p.getValue());
new RecursiveParam("falSe");
try {
new RecursiveParam("abc");
Assert.fail();
} catch(IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
}
@Test
public void testRenewerParam() {
final RenewerParam p = new RenewerParam(RenewerParam.DEFAULT);
Assert.assertEquals(null, p.getValue());
}
@Test
public void testReplicationParam() {
final ReplicationParam p = new ReplicationParam(ReplicationParam.DEFAULT);
Assert.assertEquals(null, p.getValue());
Assert.assertEquals(
(short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,
DFSConfigKeys.DFS_REPLICATION_DEFAULT),
p.getValue(conf));
new ReplicationParam((short)1);
try {
new ReplicationParam((short)0);
Assert.fail();
} catch(IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
}
@Test
public void testToSortedStringEscapesURICharacters() {
final String sep = "&";
Param<?, ?> ampParam = new TokenArgumentParam("token&ersand");
Param<?, ?> equalParam = new RenewerParam("renewer=equal");
final String expected = "&renewer=renewer%3Dequal&token=token%26ampersand";
final String actual = Param.toSortedString(sep, equalParam, ampParam);
Assert.assertEquals(expected, actual);
}
@Test
public void userNameEmpty() {
UserParam userParam = new UserParam("");
assertNull(userParam.getValue());
}
@Test(expected = IllegalArgumentException.class)
public void userNameInvalidStart() {
new UserParam("1x");
}
@Test(expected = IllegalArgumentException.class)
public void userNameInvalidDollarSign() {
new UserParam("1$x");
}
@Test
public void userNameMinLength() {
UserParam userParam = new UserParam("a");
assertNotNull(userParam.getValue());
}
@Test
public void userNameValidDollarSign() {
UserParam userParam = new UserParam("a$");
assertNotNull(userParam.getValue());
}
@Test
public void testConcatSourcesParam() {
final String[] strings = {"/", "/foo", "/bar"};
for(int n = 0; n < strings.length; n++) {
final String[] sub = new String[n];
final Path[] paths = new Path[n];
for(int i = 0; i < paths.length; i++) {
paths[i] = new Path(sub[i] = strings[i]);
}
final String expected = StringUtils.join(",", Arrays.asList(sub));
final ConcatSourcesParam computed = new ConcatSourcesParam(paths);
Assert.assertEquals(expected, computed.getValue());
}
}
@Test
public void testUserNameOkAfterResettingPattern() {
UserParam.Domain oldDomain = UserParam.getUserPatternDomain();
String newPattern = "^[A-Za-z0-9_][A-Za-z0-9._-]*[$]?$";
UserParam.setUserPattern(newPattern);
UserParam userParam = new UserParam("1x");
assertNotNull(userParam.getValue());
userParam = new UserParam("123");
assertNotNull(userParam.getValue());
UserParam.setUserPatternDomain(oldDomain);
}
@Test
public void testAclPermissionParam() {
final AclPermissionParam p =
new AclPermissionParam("user::rwx,group::r--,other::rwx,user:user1:rwx");
List<AclEntry> setAclList =
AclEntry.parseAclSpec("user::rwx,group::r--,other::rwx,user:user1:rwx",
true);
Assert.assertEquals(setAclList.toString(), p.getAclPermission(true)
.toString());
new AclPermissionParam("user::rw-,group::rwx,other::rw-,user:user1:rwx");
try {
new AclPermissionParam("user::rw--,group::rwx-,other::rw-");
Assert.fail();
} catch (IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
new AclPermissionParam(
"user::rw-,group::rwx,other::rw-,user:user1:rwx,group:group1:rwx,other::rwx,mask::rwx,default:user:user1:rwx");
try {
new AclPermissionParam("user:r-,group:rwx,other:rw-");
Assert.fail();
} catch (IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
try {
new AclPermissionParam("default:::r-,default:group::rwx,other::rw-");
Assert.fail();
} catch (IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
try {
new AclPermissionParam("user:r-,group::rwx,other:rw-,mask:rw-,temp::rwx");
Assert.fail();
} catch (IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
}
@Test
public void testXAttrNameParam() {
final XAttrNameParam p = new XAttrNameParam("user.a1");
Assert.assertEquals(p.getXAttrName(), "user.a1");
}
@Test
public void testXAttrValueParam() throws IOException {
final XAttrValueParam p = new XAttrValueParam("0x313233");
Assert.assertArrayEquals(p.getXAttrValue(),
XAttrCodec.decodeValue("0x313233"));
}
@Test
public void testXAttrEncodingParam() {
final XAttrEncodingParam p = new XAttrEncodingParam(XAttrCodec.BASE64);
Assert.assertEquals(p.getEncoding(), XAttrCodec.BASE64);
final XAttrEncodingParam p1 = new XAttrEncodingParam(p.getValueString());
Assert.assertEquals(p1.getEncoding(), XAttrCodec.BASE64);
}
@Test
public void testXAttrSetFlagParam() {
EnumSet<XAttrSetFlag> flag = EnumSet.of(
XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE);
final XAttrSetFlagParam p = new XAttrSetFlagParam(flag);
Assert.assertEquals(p.getFlag(), flag);
final XAttrSetFlagParam p1 = new XAttrSetFlagParam(p.getValueString());
Assert.assertEquals(p1.getFlag(), flag);
}
@Test
public void testRenameOptionSetParam() {
final RenameOptionSetParam p = new RenameOptionSetParam(
Options.Rename.OVERWRITE, Options.Rename.NONE);
final RenameOptionSetParam p1 = new RenameOptionSetParam(
p.getValueString());
Assert.assertEquals(p1.getValue(), EnumSet.of(
Options.Rename.OVERWRITE, Options.Rename.NONE));
}
@Test
public void testSnapshotNameParam() {
final OldSnapshotNameParam s1 = new OldSnapshotNameParam("s1");
final SnapshotNameParam s2 = new SnapshotNameParam("s2");
Assert.assertEquals("s1", s1.getValue());
Assert.assertEquals("s2", s2.getValue());
}
@Test
public void testFsActionParam() {
new FsActionParam("rwx");
new FsActionParam("rw-");
new FsActionParam("r-x");
new FsActionParam("-wx");
new FsActionParam("r--");
new FsActionParam("-w-");
new FsActionParam("--x");
new FsActionParam("---");
try {
new FsActionParam("rw");
Assert.fail();
} catch(IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
try {
new FsActionParam("qwx");
Assert.fail();
} catch(IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
try {
new FsActionParam("qrwx");
Assert.fail();
} catch(IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
try {
new FsActionParam("rwxx");
Assert.fail();
} catch(IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
try {
new FsActionParam("xwr");
Assert.fail();
} catch(IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
try {
new FsActionParam("r-w");
Assert.fail();
} catch(IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
}
}
| 13,039 | 27.533917 | 119 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitShm.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.shortcircuit;
import java.io.File;
import java.io.FileInputStream;
import java.util.ArrayList;
import java.util.Iterator;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.ExtendedBlockId;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.ShmId;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.Slot;
import org.apache.hadoop.io.nativeio.SharedFileDescriptorFactory;
import org.junit.Assert;
import org.junit.Assume;
import org.junit.Before;
import org.junit.Test;
public class TestShortCircuitShm {
public static final Log LOG = LogFactory.getLog(TestShortCircuitShm.class);
private static final File TEST_BASE =
new File(System.getProperty("test.build.data", "/tmp"));
@Before
public void before() {
Assume.assumeTrue(null ==
SharedFileDescriptorFactory.getLoadingFailureReason());
}
@Test(timeout=60000)
public void testStartupShutdown() throws Exception {
File path = new File(TEST_BASE, "testStartupShutdown");
path.mkdirs();
SharedFileDescriptorFactory factory =
SharedFileDescriptorFactory.create("shm_",
new String[] { path.getAbsolutePath() } );
FileInputStream stream =
factory.createDescriptor("testStartupShutdown", 4096);
ShortCircuitShm shm = new ShortCircuitShm(ShmId.createRandom(), stream);
shm.free();
stream.close();
FileUtil.fullyDelete(path);
}
@Test(timeout=60000)
public void testAllocateSlots() throws Exception {
File path = new File(TEST_BASE, "testAllocateSlots");
path.mkdirs();
SharedFileDescriptorFactory factory =
SharedFileDescriptorFactory.create("shm_",
new String[] { path.getAbsolutePath() });
FileInputStream stream =
factory.createDescriptor("testAllocateSlots", 4096);
ShortCircuitShm shm = new ShortCircuitShm(ShmId.createRandom(), stream);
int numSlots = 0;
ArrayList<Slot> slots = new ArrayList<Slot>();
while (!shm.isFull()) {
Slot slot = shm.allocAndRegisterSlot(new ExtendedBlockId(123L, "test_bp1"));
slots.add(slot);
numSlots++;
}
LOG.info("allocated " + numSlots + " slots before running out.");
int slotIdx = 0;
for (Iterator<Slot> iter = shm.slotIterator();
iter.hasNext(); ) {
Assert.assertTrue(slots.contains(iter.next()));
}
for (Slot slot : slots) {
Assert.assertFalse(slot.addAnchor());
Assert.assertEquals(slotIdx++, slot.getSlotIdx());
}
for (Slot slot : slots) {
slot.makeAnchorable();
}
for (Slot slot : slots) {
Assert.assertTrue(slot.addAnchor());
}
for (Slot slot : slots) {
slot.removeAnchor();
}
for (Slot slot : slots) {
shm.unregisterSlot(slot.getSlotIdx());
slot.makeInvalid();
}
shm.free();
stream.close();
FileUtil.fullyDelete(path);
}
}
| 3,844 | 33.954545 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.shortcircuit;
import static org.hamcrest.CoreMatchers.equalTo;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.EOFException;
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.net.URI;
import java.nio.ByteBuffer;
import java.security.PrivilegedExceptionAction;
import java.util.UUID;
import java.util.concurrent.TimeoutException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.AppendTestUtil;
import org.apache.hadoop.hdfs.ClientContext;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.TestBlockReaderLocal;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.unix.DomainSocket;
import org.apache.hadoop.net.unix.TemporarySocketDirectory;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.Assume;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
/**
* Test for short circuit read functionality using {@link BlockReaderLocal}.
* When a block is being read by a client is on the local datanode, instead of
* using {@link DataTransferProtocol} and connect to datanode, the short circuit
* read allows reading the file directly from the files on the local file
* system.
*/
public class TestShortCircuitLocalRead {
private static TemporarySocketDirectory sockDir;
@BeforeClass
public static void init() {
sockDir = new TemporarySocketDirectory();
DomainSocket.disableBindPathValidation();
}
@AfterClass
public static void shutdown() throws IOException {
sockDir.close();
}
@Before
public void before() {
Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
}
static final long seed = 0xDEADBEEFL;
static final int blockSize = 5120;
final boolean simulatedStorage = false;
// creates a file but does not close it
static FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl)
throws IOException {
FSDataOutputStream stm = fileSys.create(name, true,
fileSys.getConf().getInt("io.file.buffer.size", 4096),
(short)repl, blockSize);
return stm;
}
static private void checkData(byte[] actual, int from, byte[] expected,
String message) {
checkData(actual, from, expected, actual.length, message);
}
static private void checkData(byte[] actual, int from, byte[] expected, int len,
String message) {
for (int idx = 0; idx < len; idx++) {
if (expected[from + idx] != actual[idx]) {
Assert.fail(message + " byte " + (from + idx) + " differs. expected "
+ expected[from + idx] + " actual " + actual[idx] +
"\nexpected: " + StringUtils.byteToHexString(expected, from, from + len) +
"\nactual: " + StringUtils.byteToHexString(actual, 0, len));
}
}
}
private static String getCurrentUser() throws IOException {
return UserGroupInformation.getCurrentUser().getShortUserName();
}
/** Check file content, reading as user {@code readingUser} */
static void checkFileContent(URI uri, Path name, byte[] expected,
int readOffset, String readingUser, Configuration conf,
boolean legacyShortCircuitFails)
throws IOException, InterruptedException {
// Ensure short circuit is enabled
DistributedFileSystem fs = getFileSystem(readingUser, uri, conf);
ClientContext getClientContext = ClientContext.getFromConf(conf);
if (legacyShortCircuitFails) {
assertFalse(getClientContext.getDisableLegacyBlockReaderLocal());
}
FSDataInputStream stm = fs.open(name);
byte[] actual = new byte[expected.length-readOffset];
stm.readFully(readOffset, actual);
checkData(actual, readOffset, expected, "Read 2");
stm.close();
// Now read using a different API.
actual = new byte[expected.length-readOffset];
stm = fs.open(name);
IOUtils.skipFully(stm, readOffset);
//Read a small number of bytes first.
int nread = stm.read(actual, 0, 3);
nread += stm.read(actual, nread, 2);
//Read across chunk boundary
nread += stm.read(actual, nread, 517);
checkData(actual, readOffset, expected, nread, "A few bytes");
//Now read rest of it
while (nread < actual.length) {
int nbytes = stm.read(actual, nread, actual.length - nread);
if (nbytes < 0) {
throw new EOFException("End of file reached before reading fully.");
}
nread += nbytes;
}
checkData(actual, readOffset, expected, "Read 3");
if (legacyShortCircuitFails) {
assertTrue(getClientContext.getDisableLegacyBlockReaderLocal());
}
stm.close();
}
private static byte [] arrayFromByteBuffer(ByteBuffer buf) {
ByteBuffer alt = buf.duplicate();
alt.clear();
byte[] arr = new byte[alt.remaining()];
alt.get(arr);
return arr;
}
/** Check the file content, reading as user {@code readingUser} */
static void checkFileContentDirect(URI uri, Path name, byte[] expected,
int readOffset, String readingUser, Configuration conf,
boolean legacyShortCircuitFails)
throws IOException, InterruptedException {
// Ensure short circuit is enabled
DistributedFileSystem fs = getFileSystem(readingUser, uri, conf);
ClientContext clientContext = ClientContext.getFromConf(conf);
if (legacyShortCircuitFails) {
assertTrue(clientContext.getDisableLegacyBlockReaderLocal());
}
HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(name);
ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);
IOUtils.skipFully(stm, readOffset);
actual.limit(3);
//Read a small number of bytes first.
int nread = stm.read(actual);
actual.limit(nread + 2);
nread += stm.read(actual);
// Read across chunk boundary
actual.limit(Math.min(actual.capacity(), nread + 517));
nread += stm.read(actual);
checkData(arrayFromByteBuffer(actual), readOffset, expected, nread,
"A few bytes");
//Now read rest of it
actual.limit(actual.capacity());
while (actual.hasRemaining()) {
int nbytes = stm.read(actual);
if (nbytes < 0) {
throw new EOFException("End of file reached before reading fully.");
}
nread += nbytes;
}
checkData(arrayFromByteBuffer(actual), readOffset, expected, "Read 3");
if (legacyShortCircuitFails) {
assertTrue(clientContext.getDisableLegacyBlockReaderLocal());
}
stm.close();
}
public void doTestShortCircuitReadLegacy(boolean ignoreChecksum, int size,
int readOffset, String shortCircuitUser, String readingUser,
boolean legacyShortCircuitFails) throws IOException, InterruptedException {
doTestShortCircuitReadImpl(ignoreChecksum, size, readOffset,
shortCircuitUser, readingUser, legacyShortCircuitFails);
}
public void doTestShortCircuitRead(boolean ignoreChecksum, int size,
int readOffset) throws IOException, InterruptedException {
doTestShortCircuitReadImpl(ignoreChecksum, size, readOffset,
null, getCurrentUser(), false);
}
/**
* Test that file data can be read by reading the block file
* directly from the local store.
*/
public void doTestShortCircuitReadImpl(boolean ignoreChecksum, int size,
int readOffset, String shortCircuitUser, String readingUser,
boolean legacyShortCircuitFails) throws IOException, InterruptedException {
Configuration conf = new Configuration();
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY,
ignoreChecksum);
// Set a random client context name so that we don't share a cache with
// other invocations of this function.
conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT,
UUID.randomUUID().toString());
conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
new File(sockDir.getDir(),
"TestShortCircuitLocalRead._PORT.sock").getAbsolutePath());
if (shortCircuitUser != null) {
conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,
shortCircuitUser);
conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true);
}
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
.format(true).build();
FileSystem fs = cluster.getFileSystem();
try {
// check that / exists
Path path = new Path("/");
assertTrue("/ should be a directory", fs.getFileStatus(path)
.isDirectory() == true);
byte[] fileData = AppendTestUtil.randomBytes(seed, size);
Path file1 = fs.makeQualified(new Path("filelocal.dat"));
FSDataOutputStream stm = createFile(fs, file1, 1);
stm.write(fileData);
stm.close();
URI uri = cluster.getURI();
checkFileContent(uri, file1, fileData, readOffset, readingUser, conf,
legacyShortCircuitFails);
checkFileContentDirect(uri, file1, fileData, readOffset, readingUser,
conf, legacyShortCircuitFails);
} finally {
fs.close();
cluster.shutdown();
}
}
@Test(timeout=60000)
public void testFileLocalReadNoChecksum() throws Exception {
doTestShortCircuitRead(true, 3*blockSize+100, 0);
}
@Test(timeout=60000)
public void testFileLocalReadChecksum() throws Exception {
doTestShortCircuitRead(false, 3*blockSize+100, 0);
}
@Test(timeout=60000)
public void testSmallFileLocalRead() throws Exception {
doTestShortCircuitRead(false, 13, 0);
doTestShortCircuitRead(false, 13, 5);
doTestShortCircuitRead(true, 13, 0);
doTestShortCircuitRead(true, 13, 5);
}
@Test(timeout=60000)
public void testLocalReadLegacy() throws Exception {
doTestShortCircuitReadLegacy(true, 13, 0, getCurrentUser(),
getCurrentUser(), false);
}
/**
* Try a short circuit from a reader that is not allowed to
* to use short circuit. The test ensures reader falls back to non
* shortcircuit reads when shortcircuit is disallowed.
*/
@Test(timeout=60000)
public void testLocalReadFallback() throws Exception {
doTestShortCircuitReadLegacy(true, 13, 0, getCurrentUser(), "notallowed", true);
}
@Test(timeout=60000)
public void testReadFromAnOffset() throws Exception {
doTestShortCircuitRead(false, 3*blockSize+100, 777);
doTestShortCircuitRead(true, 3*blockSize+100, 777);
}
@Test(timeout=60000)
public void testLongFile() throws Exception {
doTestShortCircuitRead(false, 10*blockSize+100, 777);
doTestShortCircuitRead(true, 10*blockSize+100, 777);
}
private static DistributedFileSystem getFileSystem(String user, final URI uri,
final Configuration conf) throws InterruptedException, IOException {
UserGroupInformation ugi = UserGroupInformation.createRemoteUser(user);
return ugi.doAs(new PrivilegedExceptionAction<DistributedFileSystem>() {
@Override
public DistributedFileSystem run() throws Exception {
return (DistributedFileSystem)FileSystem.get(uri, conf);
}
});
}
@Test(timeout=10000)
public void testDeprecatedGetBlockLocalPathInfoRpc() throws IOException {
final Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
.format(true).build();
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
try {
DFSTestUtil.createFile(fs, new Path("/tmp/x"), 16, (short) 1, 23);
LocatedBlocks lb = cluster.getNameNode().getRpcServer()
.getBlockLocations("/tmp/x", 0, 16);
// Create a new block object, because the block inside LocatedBlock at
// namenode is of type BlockInfo.
ExtendedBlock blk = new ExtendedBlock(lb.get(0).getBlock());
Token<BlockTokenIdentifier> token = lb.get(0).getBlockToken();
final DatanodeInfo dnInfo = lb.get(0).getLocations()[0];
ClientDatanodeProtocol proxy =
DFSUtil.createClientDatanodeProtocolProxy(dnInfo, conf, 60000, false);
try {
proxy.getBlockLocalPathInfo(blk, token);
Assert.fail("The call should have failed as this user "
+ " is not allowed to call getBlockLocalPathInfo");
} catch (IOException ex) {
Assert.assertTrue(ex.getMessage().contains(
"not allowed to call getBlockLocalPathInfo"));
}
} finally {
fs.close();
cluster.shutdown();
}
}
@Test(timeout=10000)
public void testSkipWithVerifyChecksum() throws IOException {
int size = blockSize;
Configuration conf = new Configuration();
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY, false);
conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
"/tmp/testSkipWithVerifyChecksum._PORT");
DomainSocket.disableBindPathValidation();
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
.format(true).build();
FileSystem fs = cluster.getFileSystem();
try {
// check that / exists
Path path = new Path("/");
assertTrue("/ should be a directory", fs.getFileStatus(path)
.isDirectory() == true);
byte[] fileData = AppendTestUtil.randomBytes(seed, size*3);
// create a new file in home directory. Do not close it.
Path file1 = new Path("filelocal.dat");
FSDataOutputStream stm = createFile(fs, file1, 1);
// write to file
stm.write(fileData);
stm.close();
// now test the skip function
FSDataInputStream instm = fs.open(file1);
byte[] actual = new byte[fileData.length];
// read something from the block first, otherwise BlockReaderLocal.skip()
// will not be invoked
int nread = instm.read(actual, 0, 3);
long skipped = 2*size+3;
instm.seek(skipped);
nread = instm.read(actual, (int)(skipped + nread), 3);
instm.close();
} finally {
fs.close();
cluster.shutdown();
}
}
@Test(timeout=120000)
public void testHandleTruncatedBlockFile() throws IOException {
MiniDFSCluster cluster = null;
HdfsConfiguration conf = new HdfsConfiguration();
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY, false);
conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
"/tmp/testHandleTruncatedBlockFile._PORT");
conf.set(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY, "CRC32C");
final Path TEST_PATH = new Path("/a");
final Path TEST_PATH2 = new Path("/b");
final long RANDOM_SEED = 4567L;
final long RANDOM_SEED2 = 4568L;
FSDataInputStream fsIn = null;
final int TEST_LENGTH = 3456;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, TEST_PATH,
TEST_LENGTH, (short)1, RANDOM_SEED);
DFSTestUtil.createFile(fs, TEST_PATH2,
TEST_LENGTH, (short)1, RANDOM_SEED2);
fsIn = cluster.getFileSystem().open(TEST_PATH2);
byte original[] = new byte[TEST_LENGTH];
IOUtils.readFully(fsIn, original, 0, TEST_LENGTH);
fsIn.close();
fsIn = null;
try {
DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
} catch (InterruptedException e) {
Assert.fail("unexpected InterruptedException during " +
"waitReplication: " + e);
} catch (TimeoutException e) {
Assert.fail("unexpected TimeoutException during " +
"waitReplication: " + e);
}
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, TEST_PATH);
File dataFile = cluster.getBlockFile(0, block);
cluster.shutdown();
cluster = null;
RandomAccessFile raf = null;
try {
raf = new RandomAccessFile(dataFile, "rw");
raf.setLength(0);
} finally {
if (raf != null) raf.close();
}
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(false).build();
cluster.waitActive();
fs = cluster.getFileSystem();
fsIn = fs.open(TEST_PATH);
try {
byte buf[] = new byte[100];
fsIn.seek(2000);
fsIn.readFully(buf, 0, buf.length);
Assert.fail("shouldn't be able to read from corrupt 0-length " +
"block file.");
} catch (IOException e) {
DFSClient.LOG.error("caught exception ", e);
}
fsIn.close();
fsIn = null;
// We should still be able to read the other file.
// This is important because it indicates that we detected that the
// previous block was corrupt, rather than blaming the problem on
// communication.
fsIn = fs.open(TEST_PATH2);
byte buf[] = new byte[original.length];
fsIn.readFully(buf, 0, buf.length);
TestBlockReaderLocal.assertArrayRegionsEqual(original, 0, buf, 0,
original.length);
fsIn.close();
fsIn = null;
} finally {
if (fsIn != null) fsIn.close();
if (cluster != null) cluster.shutdown();
}
}
/**
* Test to run benchmarks between short circuit read vs regular read with
* specified number of threads simultaneously reading.
* <br>
* Run this using the following command:
* bin/hadoop --config confdir \
* org.apache.hadoop.hdfs.TestShortCircuitLocalRead \
* <shortcircuit on?> <checsum on?> <Number of threads>
*/
public static void main(String[] args) throws Exception {
if (args.length != 3) {
System.out.println("Usage: test shortcircuit checksum threadCount");
System.exit(1);
}
boolean shortcircuit = Boolean.valueOf(args[0]);
boolean checksum = Boolean.valueOf(args[1]);
int threadCount = Integer.parseInt(args[2]);
// Setup create a file
final Configuration conf = new Configuration();
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, shortcircuit);
conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
"/tmp/TestShortCircuitLocalRead._PORT");
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY,
checksum);
//Override fileSize and DATA_TO_WRITE to much larger values for benchmark test
int fileSize = 1000 * blockSize + 100; // File with 1000 blocks
final byte [] dataToWrite = AppendTestUtil.randomBytes(seed, fileSize);
// create a new file in home directory. Do not close it.
final Path file1 = new Path("filelocal.dat");
final FileSystem fs = FileSystem.get(conf);
FSDataOutputStream stm = createFile(fs, file1, 1);
stm.write(dataToWrite);
stm.close();
long start = Time.now();
final int iteration = 20;
Thread[] threads = new Thread[threadCount];
for (int i = 0; i < threadCount; i++) {
threads[i] = new Thread() {
@Override
public void run() {
for (int i = 0; i < iteration; i++) {
try {
String user = getCurrentUser();
checkFileContent(fs.getUri(), file1, dataToWrite, 0, user, conf, true);
} catch (IOException e) {
e.printStackTrace();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
};
}
for (int i = 0; i < threadCount; i++) {
threads[i].start();
}
for (int i = 0; i < threadCount; i++) {
threads[i].join();
}
long end = Time.now();
System.out.println("Iteration " + iteration + " took " + (end - start));
fs.delete(file1, false);
}
@Test(timeout=60000)
public void testReadWithRemoteBlockReader() throws IOException, InterruptedException {
doTestShortCircuitReadWithRemoteBlockReader(true, 3*blockSize+100, getCurrentUser(), 0, false);
}
/**
* Test that file data can be read by reading the block
* through RemoteBlockReader
* @throws IOException
*/
public void doTestShortCircuitReadWithRemoteBlockReader(boolean ignoreChecksum, int size, String shortCircuitUser,
int readOffset, boolean shortCircuitFails) throws IOException, InterruptedException {
Configuration conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER, true);
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
.format(true).build();
FileSystem fs = cluster.getFileSystem();
// check that / exists
Path path = new Path("/");
URI uri = cluster.getURI();
assertTrue("/ should be a directory", fs.getFileStatus(path)
.isDirectory() == true);
byte[] fileData = AppendTestUtil.randomBytes(seed, size);
Path file1 = new Path("filelocal.dat");
FSDataOutputStream stm = createFile(fs, file1, 1);
stm.write(fileData);
stm.close();
try {
checkFileContent(uri, file1, fileData, readOffset, shortCircuitUser,
conf, shortCircuitFails);
//RemoteBlockReader have unsupported method read(ByteBuffer bf)
assertTrue("RemoteBlockReader unsupported method read(ByteBuffer bf) error",
checkUnsupportedMethod(fs, file1, fileData, readOffset));
} catch(IOException e) {
throw new IOException("doTestShortCircuitReadWithRemoteBlockReader ex error ", e);
} catch(InterruptedException inEx) {
throw inEx;
} finally {
fs.close();
cluster.shutdown();
}
}
private boolean checkUnsupportedMethod(FileSystem fs, Path file,
byte[] expected, int readOffset) throws IOException {
HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(file);
ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);
IOUtils.skipFully(stm, readOffset);
try {
stm.read(actual);
} catch(UnsupportedOperationException unex) {
return true;
}
return false;
}
}
| 24,440 | 36.951863 | 143 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.shortcircuit;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CONTEXT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY;
import static org.hamcrest.CoreMatchers.equalTo;
import java.io.DataOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import org.apache.commons.lang.mutable.MutableBoolean;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.BlockReaderFactory;
import org.apache.hadoop.hdfs.BlockReaderTestUtil;
import org.apache.hadoop.hdfs.DFSInputStream;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.ExtendedBlockId;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.net.DomainPeer;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector;
import org.apache.hadoop.hdfs.server.datanode.ShortCircuitRegistry;
import org.apache.hadoop.hdfs.server.datanode.ShortCircuitRegistry.RegisteredShm;
import org.apache.hadoop.hdfs.shortcircuit.DfsClientShmManager.PerDatanodeVisitorInfo;
import org.apache.hadoop.hdfs.shortcircuit.DfsClientShmManager.Visitor;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache.CacheVisitor;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache.ShortCircuitReplicaCreator;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.ShmId;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.Slot;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.unix.DomainSocket;
import org.apache.hadoop.net.unix.TemporarySocketDirectory;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.Time;
import org.junit.Assert;
import org.junit.Assume;
import org.junit.Test;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import com.google.common.base.Preconditions;
import com.google.common.base.Supplier;
import com.google.common.collect.HashMultimap;
public class TestShortCircuitCache {
static final Log LOG = LogFactory.getLog(TestShortCircuitCache.class);
private static class TestFileDescriptorPair {
final TemporarySocketDirectory dir = new TemporarySocketDirectory();
final FileInputStream[] fis;
public TestFileDescriptorPair() throws IOException {
fis = new FileInputStream[2];
for (int i = 0; i < 2; i++) {
String name = dir.getDir() + "/file" + i;
FileOutputStream fos = new FileOutputStream(name);
if (i == 0) {
// write 'data' file
fos.write(1);
} else {
// write 'metadata' file
BlockMetadataHeader header =
new BlockMetadataHeader((short)1,
DataChecksum.newDataChecksum(DataChecksum.Type.NULL, 4));
DataOutputStream dos = new DataOutputStream(fos);
BlockMetadataHeader.writeHeader(dos, header);
dos.close();
}
fos.close();
fis[i] = new FileInputStream(name);
}
}
public FileInputStream[] getFileInputStreams() {
return fis;
}
public void close() throws IOException {
IOUtils.cleanup(LOG, fis);
dir.close();
}
public boolean compareWith(FileInputStream data, FileInputStream meta) {
return ((data == fis[0]) && (meta == fis[1]));
}
}
private static class SimpleReplicaCreator
implements ShortCircuitReplicaCreator {
private final int blockId;
private final ShortCircuitCache cache;
private final TestFileDescriptorPair pair;
SimpleReplicaCreator(int blockId, ShortCircuitCache cache,
TestFileDescriptorPair pair) {
this.blockId = blockId;
this.cache = cache;
this.pair = pair;
}
@Override
public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
try {
ExtendedBlockId key = new ExtendedBlockId(blockId, "test_bp1");
return new ShortCircuitReplicaInfo(
new ShortCircuitReplica(key,
pair.getFileInputStreams()[0], pair.getFileInputStreams()[1],
cache, Time.monotonicNow(), null));
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
@Test(timeout=60000)
public void testCreateAndDestroy() throws Exception {
ShortCircuitCache cache =
new ShortCircuitCache(10, 1, 10, 1, 1, 10000, 0);
cache.close();
}
@Test(timeout=60000)
public void testAddAndRetrieve() throws Exception {
final ShortCircuitCache cache =
new ShortCircuitCache(10, 10000000, 10, 10000000, 1, 10000, 0);
final TestFileDescriptorPair pair = new TestFileDescriptorPair();
ShortCircuitReplicaInfo replicaInfo1 =
cache.fetchOrCreate(new ExtendedBlockId(123, "test_bp1"),
new SimpleReplicaCreator(123, cache, pair));
Preconditions.checkNotNull(replicaInfo1.getReplica());
Preconditions.checkState(replicaInfo1.getInvalidTokenException() == null);
pair.compareWith(replicaInfo1.getReplica().getDataStream(),
replicaInfo1.getReplica().getMetaStream());
ShortCircuitReplicaInfo replicaInfo2 =
cache.fetchOrCreate(new ExtendedBlockId(123, "test_bp1"),
new ShortCircuitReplicaCreator() {
@Override
public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
Assert.fail("expected to use existing entry.");
return null;
}
});
Preconditions.checkNotNull(replicaInfo2.getReplica());
Preconditions.checkState(replicaInfo2.getInvalidTokenException() == null);
Preconditions.checkState(replicaInfo1 == replicaInfo2);
pair.compareWith(replicaInfo2.getReplica().getDataStream(),
replicaInfo2.getReplica().getMetaStream());
replicaInfo1.getReplica().unref();
replicaInfo2.getReplica().unref();
// Even after the reference count falls to 0, we still keep the replica
// around for a while (we have configured the expiry period to be really,
// really long here)
ShortCircuitReplicaInfo replicaInfo3 =
cache.fetchOrCreate(
new ExtendedBlockId(123, "test_bp1"), new ShortCircuitReplicaCreator() {
@Override
public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
Assert.fail("expected to use existing entry.");
return null;
}
});
Preconditions.checkNotNull(replicaInfo3.getReplica());
Preconditions.checkState(replicaInfo3.getInvalidTokenException() == null);
replicaInfo3.getReplica().unref();
pair.close();
cache.close();
}
@Test(timeout=100000)
public void testExpiry() throws Exception {
final ShortCircuitCache cache =
new ShortCircuitCache(2, 1, 1, 10000000, 1, 10000000, 0);
final TestFileDescriptorPair pair = new TestFileDescriptorPair();
ShortCircuitReplicaInfo replicaInfo1 =
cache.fetchOrCreate(
new ExtendedBlockId(123, "test_bp1"),
new SimpleReplicaCreator(123, cache, pair));
Preconditions.checkNotNull(replicaInfo1.getReplica());
Preconditions.checkState(replicaInfo1.getInvalidTokenException() == null);
pair.compareWith(replicaInfo1.getReplica().getDataStream(),
replicaInfo1.getReplica().getMetaStream());
replicaInfo1.getReplica().unref();
final MutableBoolean triedToCreate = new MutableBoolean(false);
do {
Thread.sleep(10);
ShortCircuitReplicaInfo replicaInfo2 =
cache.fetchOrCreate(
new ExtendedBlockId(123, "test_bp1"), new ShortCircuitReplicaCreator() {
@Override
public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
triedToCreate.setValue(true);
return null;
}
});
if ((replicaInfo2 != null) && (replicaInfo2.getReplica() != null)) {
replicaInfo2.getReplica().unref();
}
} while (triedToCreate.isFalse());
cache.close();
}
@Test(timeout=60000)
public void testEviction() throws Exception {
final ShortCircuitCache cache =
new ShortCircuitCache(2, 10000000, 1, 10000000, 1, 10000, 0);
final TestFileDescriptorPair pairs[] = new TestFileDescriptorPair[] {
new TestFileDescriptorPair(),
new TestFileDescriptorPair(),
new TestFileDescriptorPair(),
};
ShortCircuitReplicaInfo replicaInfos[] = new ShortCircuitReplicaInfo[] {
null,
null,
null
};
for (int i = 0; i < pairs.length; i++) {
replicaInfos[i] = cache.fetchOrCreate(
new ExtendedBlockId(i, "test_bp1"),
new SimpleReplicaCreator(i, cache, pairs[i]));
Preconditions.checkNotNull(replicaInfos[i].getReplica());
Preconditions.checkState(replicaInfos[i].getInvalidTokenException() == null);
pairs[i].compareWith(replicaInfos[i].getReplica().getDataStream(),
replicaInfos[i].getReplica().getMetaStream());
}
// At this point, we have 3 replicas in use.
// Let's close them all.
for (int i = 0; i < pairs.length; i++) {
replicaInfos[i].getReplica().unref();
}
// The last two replicas should still be cached.
for (int i = 1; i < pairs.length; i++) {
final Integer iVal = new Integer(i);
replicaInfos[i] = cache.fetchOrCreate(
new ExtendedBlockId(i, "test_bp1"),
new ShortCircuitReplicaCreator() {
@Override
public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
Assert.fail("expected to use existing entry for " + iVal);
return null;
}
});
Preconditions.checkNotNull(replicaInfos[i].getReplica());
Preconditions.checkState(replicaInfos[i].getInvalidTokenException() == null);
pairs[i].compareWith(replicaInfos[i].getReplica().getDataStream(),
replicaInfos[i].getReplica().getMetaStream());
}
// The first (oldest) replica should not be cached.
final MutableBoolean calledCreate = new MutableBoolean(false);
replicaInfos[0] = cache.fetchOrCreate(
new ExtendedBlockId(0, "test_bp1"),
new ShortCircuitReplicaCreator() {
@Override
public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
calledCreate.setValue(true);
return null;
}
});
Preconditions.checkState(replicaInfos[0].getReplica() == null);
Assert.assertTrue(calledCreate.isTrue());
// Clean up
for (int i = 1; i < pairs.length; i++) {
replicaInfos[i].getReplica().unref();
}
for (int i = 0; i < pairs.length; i++) {
pairs[i].close();
}
cache.close();
}
@Test(timeout=60000)
public void testTimeBasedStaleness() throws Exception {
// Set up the cache with a short staleness time.
final ShortCircuitCache cache =
new ShortCircuitCache(2, 10000000, 1, 10000000, 1, 10, 0);
final TestFileDescriptorPair pairs[] = new TestFileDescriptorPair[] {
new TestFileDescriptorPair(),
new TestFileDescriptorPair(),
};
ShortCircuitReplicaInfo replicaInfos[] = new ShortCircuitReplicaInfo[] {
null,
null
};
final long HOUR_IN_MS = 60 * 60 * 1000;
for (int i = 0; i < pairs.length; i++) {
final Integer iVal = new Integer(i);
final ExtendedBlockId key = new ExtendedBlockId(i, "test_bp1");
replicaInfos[i] = cache.fetchOrCreate(key,
new ShortCircuitReplicaCreator() {
@Override
public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
try {
return new ShortCircuitReplicaInfo(
new ShortCircuitReplica(key,
pairs[iVal].getFileInputStreams()[0],
pairs[iVal].getFileInputStreams()[1],
cache, Time.monotonicNow() + (iVal * HOUR_IN_MS), null));
} catch (IOException e) {
throw new RuntimeException(e);
}
}
});
Preconditions.checkNotNull(replicaInfos[i].getReplica());
Preconditions.checkState(replicaInfos[i].getInvalidTokenException() == null);
pairs[i].compareWith(replicaInfos[i].getReplica().getDataStream(),
replicaInfos[i].getReplica().getMetaStream());
}
// Keep trying to getOrCreate block 0 until it goes stale (and we must re-create.)
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
ShortCircuitReplicaInfo info = cache.fetchOrCreate(
new ExtendedBlockId(0, "test_bp1"), new ShortCircuitReplicaCreator() {
@Override
public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
return null;
}
});
if (info.getReplica() != null) {
info.getReplica().unref();
return false;
}
return true;
}
}, 500, 60000);
// Make sure that second replica did not go stale.
ShortCircuitReplicaInfo info = cache.fetchOrCreate(
new ExtendedBlockId(1, "test_bp1"), new ShortCircuitReplicaCreator() {
@Override
public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
Assert.fail("second replica went stale, despite 1 " +
"hour staleness time.");
return null;
}
});
info.getReplica().unref();
// Clean up
for (int i = 1; i < pairs.length; i++) {
replicaInfos[i].getReplica().unref();
}
cache.close();
}
private static Configuration createShortCircuitConf(String testName,
TemporarySocketDirectory sockDir) {
Configuration conf = new Configuration();
conf.set(DFS_CLIENT_CONTEXT, testName);
conf.setLong(DFS_BLOCK_SIZE_KEY, 4096);
conf.set(DFS_DOMAIN_SOCKET_PATH_KEY, new File(sockDir.getDir(),
testName).getAbsolutePath());
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY,
false);
conf.setBoolean(DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, false);
DFSInputStream.tcpReadsDisabledForTesting = true;
DomainSocket.disableBindPathValidation();
Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
return conf;
}
private static DomainPeer getDomainPeerToDn(Configuration conf)
throws IOException {
DomainSocket sock =
DomainSocket.connect(conf.get(DFS_DOMAIN_SOCKET_PATH_KEY));
return new DomainPeer(sock);
}
@Test(timeout=60000)
public void testAllocShm() throws Exception {
BlockReaderTestUtil.enableShortCircuitShmTracing();
TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
Configuration conf = createShortCircuitConf("testAllocShm", sockDir);
MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
final ShortCircuitCache cache =
fs.getClient().getClientContext().getShortCircuitCache();
cache.getDfsClientShmManager().visit(new Visitor() {
@Override
public void visit(HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info)
throws IOException {
// The ClientShmManager starts off empty
Assert.assertEquals(0, info.size());
}
});
DomainPeer peer = getDomainPeerToDn(conf);
MutableBoolean usedPeer = new MutableBoolean(false);
ExtendedBlockId blockId = new ExtendedBlockId(123, "xyz");
final DatanodeInfo datanode =
new DatanodeInfo(cluster.getDataNodes().get(0).getDatanodeId());
// Allocating the first shm slot requires using up a peer.
Slot slot = cache.allocShmSlot(datanode, peer, usedPeer,
blockId, "testAllocShm_client");
Assert.assertNotNull(slot);
Assert.assertTrue(usedPeer.booleanValue());
cache.getDfsClientShmManager().visit(new Visitor() {
@Override
public void visit(HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info)
throws IOException {
// The ClientShmManager starts off empty
Assert.assertEquals(1, info.size());
PerDatanodeVisitorInfo vinfo = info.get(datanode);
Assert.assertFalse(vinfo.disabled);
Assert.assertEquals(0, vinfo.full.size());
Assert.assertEquals(1, vinfo.notFull.size());
}
});
cache.scheduleSlotReleaser(slot);
// Wait for the slot to be released, and the shared memory area to be
// closed. Since we didn't register this shared memory segment on the
// server, it will also be a test of how well the server deals with
// bogus client behavior.
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
final MutableBoolean done = new MutableBoolean(false);
try {
cache.getDfsClientShmManager().visit(new Visitor() {
@Override
public void visit(HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info)
throws IOException {
done.setValue(info.get(datanode).full.isEmpty() &&
info.get(datanode).notFull.isEmpty());
}
});
} catch (IOException e) {
LOG.error("error running visitor", e);
}
return done.booleanValue();
}
}, 10, 60000);
cluster.shutdown();
sockDir.close();
}
@Test(timeout=60000)
public void testShmBasedStaleness() throws Exception {
BlockReaderTestUtil.enableShortCircuitShmTracing();
TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
Configuration conf = createShortCircuitConf("testShmBasedStaleness", sockDir);
MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
final ShortCircuitCache cache =
fs.getClient().getClientContext().getShortCircuitCache();
String TEST_FILE = "/test_file";
final int TEST_FILE_LEN = 8193;
final int SEED = 0xFADED;
DFSTestUtil.createFile(fs, new Path(TEST_FILE), TEST_FILE_LEN,
(short)1, SEED);
FSDataInputStream fis = fs.open(new Path(TEST_FILE));
int first = fis.read();
final ExtendedBlock block =
DFSTestUtil.getFirstBlock(fs, new Path(TEST_FILE));
Assert.assertTrue(first != -1);
cache.accept(new CacheVisitor() {
@Override
public void visit(int numOutstandingMmaps,
Map<ExtendedBlockId, ShortCircuitReplica> replicas,
Map<ExtendedBlockId, InvalidToken> failedLoads,
Map<Long, ShortCircuitReplica> evictable,
Map<Long, ShortCircuitReplica> evictableMmapped) {
ShortCircuitReplica replica = replicas.get(
ExtendedBlockId.fromExtendedBlock(block));
Assert.assertNotNull(replica);
Assert.assertTrue(replica.getSlot().isValid());
}
});
// Stop the Namenode. This will close the socket keeping the client's
// shared memory segment alive, and make it stale.
cluster.getDataNodes().get(0).shutdown();
cache.accept(new CacheVisitor() {
@Override
public void visit(int numOutstandingMmaps,
Map<ExtendedBlockId, ShortCircuitReplica> replicas,
Map<ExtendedBlockId, InvalidToken> failedLoads,
Map<Long, ShortCircuitReplica> evictable,
Map<Long, ShortCircuitReplica> evictableMmapped) {
ShortCircuitReplica replica = replicas.get(
ExtendedBlockId.fromExtendedBlock(block));
Assert.assertNotNull(replica);
Assert.assertFalse(replica.getSlot().isValid());
}
});
cluster.shutdown();
sockDir.close();
}
/**
* Test unlinking a file whose blocks we are caching in the DFSClient.
* The DataNode will notify the DFSClient that the replica is stale via the
* ShortCircuitShm.
*/
@Test(timeout=60000)
public void testUnlinkingReplicasInFileDescriptorCache() throws Exception {
BlockReaderTestUtil.enableShortCircuitShmTracing();
TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
Configuration conf = createShortCircuitConf(
"testUnlinkingReplicasInFileDescriptorCache", sockDir);
// We don't want the CacheCleaner to time out short-circuit shared memory
// segments during the test, so set the timeout really high.
conf.setLong(HdfsClientConfigKeys.Read.ShortCircuit.STREAMS_CACHE_EXPIRY_MS_KEY,
1000000000L);
MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
final ShortCircuitCache cache =
fs.getClient().getClientContext().getShortCircuitCache();
cache.getDfsClientShmManager().visit(new Visitor() {
@Override
public void visit(HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info)
throws IOException {
// The ClientShmManager starts off empty.
Assert.assertEquals(0, info.size());
}
});
final Path TEST_PATH = new Path("/test_file");
final int TEST_FILE_LEN = 8193;
final int SEED = 0xFADE0;
DFSTestUtil.createFile(fs, TEST_PATH, TEST_FILE_LEN,
(short)1, SEED);
byte contents[] = DFSTestUtil.readFileBuffer(fs, TEST_PATH);
byte expected[] = DFSTestUtil.
calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
Assert.assertTrue(Arrays.equals(contents, expected));
// Loading this file brought the ShortCircuitReplica into our local
// replica cache.
final DatanodeInfo datanode =
new DatanodeInfo(cluster.getDataNodes().get(0).getDatanodeId());
cache.getDfsClientShmManager().visit(new Visitor() {
@Override
public void visit(HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info)
throws IOException {
Assert.assertTrue(info.get(datanode).full.isEmpty());
Assert.assertFalse(info.get(datanode).disabled);
Assert.assertEquals(1, info.get(datanode).notFull.values().size());
DfsClientShm shm =
info.get(datanode).notFull.values().iterator().next();
Assert.assertFalse(shm.isDisconnected());
}
});
// Remove the file whose blocks we just read.
fs.delete(TEST_PATH, false);
// Wait for the replica to be purged from the DFSClient's cache.
GenericTestUtils.waitFor(new Supplier<Boolean>() {
MutableBoolean done = new MutableBoolean(true);
@Override
public Boolean get() {
try {
done.setValue(true);
cache.getDfsClientShmManager().visit(new Visitor() {
@Override
public void visit(HashMap<DatanodeInfo,
PerDatanodeVisitorInfo> info) throws IOException {
Assert.assertTrue(info.get(datanode).full.isEmpty());
Assert.assertFalse(info.get(datanode).disabled);
Assert.assertEquals(1,
info.get(datanode).notFull.values().size());
DfsClientShm shm = info.get(datanode).notFull.values().
iterator().next();
// Check that all slots have been invalidated.
for (Iterator<Slot> iter = shm.slotIterator();
iter.hasNext(); ) {
Slot slot = iter.next();
if (slot.isValid()) {
done.setValue(false);
}
}
}
});
} catch (IOException e) {
LOG.error("error running visitor", e);
}
return done.booleanValue();
}
}, 10, 60000);
cluster.shutdown();
sockDir.close();
}
static private void checkNumberOfSegmentsAndSlots(final int expectedSegments,
final int expectedSlots, ShortCircuitRegistry registry) {
registry.visit(new ShortCircuitRegistry.Visitor() {
@Override
public void accept(HashMap<ShmId, RegisteredShm> segments,
HashMultimap<ExtendedBlockId, Slot> slots) {
Assert.assertEquals(expectedSegments, segments.size());
Assert.assertEquals(expectedSlots, slots.size());
}
});
}
public static class TestCleanupFailureInjector
extends BlockReaderFactory.FailureInjector {
@Override
public void injectRequestFileDescriptorsFailure() throws IOException {
throw new IOException("injected I/O error");
}
}
// Regression test for HDFS-7915
@Test(timeout=60000)
public void testDataXceiverCleansUpSlotsOnFailure() throws Exception {
BlockReaderTestUtil.enableShortCircuitShmTracing();
TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
Configuration conf = createShortCircuitConf(
"testDataXceiverCleansUpSlotsOnFailure", sockDir);
conf.setLong(
HdfsClientConfigKeys.Read.ShortCircuit.STREAMS_CACHE_EXPIRY_MS_KEY,
1000000000L);
MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
final Path TEST_PATH1 = new Path("/test_file1");
final Path TEST_PATH2 = new Path("/test_file2");
final int TEST_FILE_LEN = 4096;
final int SEED = 0xFADE1;
DFSTestUtil.createFile(fs, TEST_PATH1, TEST_FILE_LEN,
(short)1, SEED);
DFSTestUtil.createFile(fs, TEST_PATH2, TEST_FILE_LEN,
(short)1, SEED);
// The first read should allocate one shared memory segment and slot.
DFSTestUtil.readFileBuffer(fs, TEST_PATH1);
// The second read should fail, and we should only have 1 segment and 1 slot
// left.
fs.getClient().getConf().getShortCircuitConf().brfFailureInjector =
new TestCleanupFailureInjector();
try {
DFSTestUtil.readFileBuffer(fs, TEST_PATH2);
} catch (Throwable t) {
GenericTestUtils.assertExceptionContains("TCP reads were disabled for " +
"testing, but we failed to do a non-TCP read.", t);
}
checkNumberOfSegmentsAndSlots(1, 1,
cluster.getDataNodes().get(0).getShortCircuitRegistry());
cluster.shutdown();
sockDir.close();
}
// Regression test for HADOOP-11802
@Test(timeout=60000)
public void testDataXceiverHandlesRequestShortCircuitShmFailure()
throws Exception {
BlockReaderTestUtil.enableShortCircuitShmTracing();
TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
Configuration conf = createShortCircuitConf(
"testDataXceiverHandlesRequestShortCircuitShmFailure", sockDir);
conf.setLong(HdfsClientConfigKeys.Read.ShortCircuit.STREAMS_CACHE_EXPIRY_MS_KEY,
1000000000L);
MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
final Path TEST_PATH1 = new Path("/test_file1");
DFSTestUtil.createFile(fs, TEST_PATH1, 4096,
(short)1, 0xFADE1);
LOG.info("Setting failure injector and performing a read which " +
"should fail...");
DataNodeFaultInjector failureInjector = Mockito.mock(DataNodeFaultInjector.class);
Mockito.doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
throw new IOException("injected error into sendShmResponse");
}
}).when(failureInjector).sendShortCircuitShmResponse();
DataNodeFaultInjector prevInjector = DataNodeFaultInjector.instance;
DataNodeFaultInjector.instance = failureInjector;
try {
// The first read will try to allocate a shared memory segment and slot.
// The shared memory segment allocation will fail because of the failure
// injector.
DFSTestUtil.readFileBuffer(fs, TEST_PATH1);
Assert.fail("expected readFileBuffer to fail, but it succeeded.");
} catch (Throwable t) {
GenericTestUtils.assertExceptionContains("TCP reads were disabled for " +
"testing, but we failed to do a non-TCP read.", t);
}
checkNumberOfSegmentsAndSlots(0, 0,
cluster.getDataNodes().get(0).getShortCircuitRegistry());
LOG.info("Clearing failure injector and performing another read...");
DataNodeFaultInjector.instance = prevInjector;
fs.getClient().getClientContext().getDomainSocketFactory().clearPathMap();
// The second read should succeed.
DFSTestUtil.readFileBuffer(fs, TEST_PATH1);
// We should have added a new short-circuit shared memory segment and slot.
checkNumberOfSegmentsAndSlots(1, 1,
cluster.getDataNodes().get(0).getShortCircuitRegistry());
cluster.shutdown();
sockDir.close();
}
public static class TestPreReceiptVerificationFailureInjector
extends BlockReaderFactory.FailureInjector {
@Override
public boolean getSupportsReceiptVerification() {
return false;
}
}
// Regression test for HDFS-8070
@Test(timeout=60000)
public void testPreReceiptVerificationDfsClientCanDoScr() throws Exception {
BlockReaderTestUtil.enableShortCircuitShmTracing();
TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
Configuration conf = createShortCircuitConf(
"testPreReceiptVerificationDfsClientCanDoScr", sockDir);
conf.setLong(
HdfsClientConfigKeys.Read.ShortCircuit.STREAMS_CACHE_EXPIRY_MS_KEY,
1000000000L);
MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
fs.getClient().getConf().getShortCircuitConf().brfFailureInjector =
new TestPreReceiptVerificationFailureInjector();
final Path TEST_PATH1 = new Path("/test_file1");
DFSTestUtil.createFile(fs, TEST_PATH1, 4096, (short)1, 0xFADE2);
final Path TEST_PATH2 = new Path("/test_file2");
DFSTestUtil.createFile(fs, TEST_PATH2, 4096, (short)1, 0xFADE2);
DFSTestUtil.readFileBuffer(fs, TEST_PATH1);
DFSTestUtil.readFileBuffer(fs, TEST_PATH2);
ShortCircuitRegistry registry =
cluster.getDataNodes().get(0).getShortCircuitRegistry();
registry.visit(new ShortCircuitRegistry.Visitor() {
@Override
public void accept(HashMap<ShmId, RegisteredShm> segments,
HashMultimap<ExtendedBlockId, Slot> slots) {
Assert.assertEquals(1, segments.size());
Assert.assertEquals(2, slots.size());
}
});
cluster.shutdown();
sockDir.close();
}
}
| 32,124 | 39.613148 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestExactSizeInputStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.util;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.fail;
import java.io.ByteArrayInputStream;
import java.io.EOFException;
import java.io.IOException;
import java.io.InputStream;
import org.junit.Test;
public class TestExactSizeInputStream {
@Test
public void testBasicsReadSingle() throws IOException {
ExactSizeInputStream s = new ExactSizeInputStream(byteStream("hello"), 3);
assertEquals(3, s.available());
assertEquals((int)'h', s.read());
assertEquals((int)'e', s.read());
assertEquals((int)'l', s.read());
assertEquals(-1, s.read());
assertEquals(0, s.available());
}
@Test
public void testBasicsReadArray() throws IOException {
ExactSizeInputStream s = new ExactSizeInputStream(byteStream("hello"), 3);
assertEquals(3, s.available());
byte[] buf = new byte[10];
assertEquals(2, s.read(buf, 0, 2));
assertEquals('h', buf[0]);
assertEquals('e', buf[1]);
assertEquals(1, s.read(buf, 0, 2));
assertEquals('l', buf[0]);
assertEquals(-1, s.read(buf, 0, 2));
}
@Test
public void testBasicsSkip() throws IOException {
ExactSizeInputStream s = new ExactSizeInputStream(byteStream("hello"), 3);
assertEquals(3, s.available());
assertEquals(2, s.skip(2));
assertEquals(1, s.skip(2));
assertEquals(0, s.skip(2));
}
@Test
public void testReadNotEnough() throws IOException {
// Ask for 5 bytes, only has 2
ExactSizeInputStream s = new ExactSizeInputStream(byteStream("he"), 5);
assertEquals(2, s.available());
assertEquals((int)'h', s.read());
assertEquals((int)'e', s.read());
try {
s.read();
fail("Read when should be out of data");
} catch (EOFException e) {
// expected
}
}
@Test
public void testSkipNotEnough() throws IOException {
// Ask for 5 bytes, only has 2
ExactSizeInputStream s = new ExactSizeInputStream(byteStream("he"), 5);
assertEquals(2, s.skip(3));
try {
s.skip(1);
fail("Skip when should be out of data");
} catch (EOFException e) {
// expected
}
}
@Test
public void testReadArrayNotEnough() throws IOException {
// Ask for 5 bytes, only has 2
ExactSizeInputStream s = new ExactSizeInputStream(byteStream("he"), 5);
byte[] buf = new byte[10];
assertEquals(2, s.read(buf, 0, 5));
try {
s.read(buf, 2, 3);
fail("Read buf when should be out of data");
} catch (EOFException e) {
// expected
}
}
@Test
public void testMark() throws IOException {
ExactSizeInputStream s = new ExactSizeInputStream(byteStream("he"), 5);
assertFalse(s.markSupported());
try {
s.mark(1);
fail("Mark should not succeed");
} catch (UnsupportedOperationException uoe) {
// expected
}
}
private static InputStream byteStream(String data) {
return new ByteArrayInputStream(data.getBytes());
}
}
| 3,861 | 28.707692 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDiff.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.util;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.util.Diff;
import org.apache.hadoop.hdfs.util.Diff.Container;
import org.apache.hadoop.hdfs.util.Diff.UndoInfo;
import org.junit.Assert;
import org.junit.Test;
/**
* Test {@link Diff} with {@link INode}.
*/
public class TestDiff {
private static final Random RANDOM = new Random();
private static final int UNDO_TEST_P = 10;
private static final PermissionStatus PERM = PermissionStatus.createImmutable(
"user", "group", FsPermission.createImmutable((short)0));
static int nextStep(int n) {
return n == 0? 1: 10*n;
}
/** Test directory diff. */
@Test(timeout=60000)
public void testDiff() throws Exception {
for(int startSize = 0; startSize <= 10000; startSize = nextStep(startSize)) {
for(int m = 0; m <= 10000; m = nextStep(m)) {
runDiffTest(startSize, m);
}
}
}
/**
* The following are the step of the diff test:
* 1) Initialize the previous list and add s elements to it,
* where s = startSize.
* 2) Initialize the current list by coping all elements from the previous list
* 3) Initialize an empty diff object.
* 4) Make m modifications to the current list, where m = numModifications.
* Record the modifications in diff at the same time.
* 5) Test if current == previous + diff and previous == current - diff.
* 6) Test accessPrevious and accessCurrent.
*
* @param startSize
* @param numModifications
* @param computeDiff
*/
void runDiffTest(int startSize, int numModifications) {
final int width = findWidth(startSize + numModifications);
System.out.println("\nstartSize=" + startSize
+ ", numModifications=" + numModifications
+ ", width=" + width);
// initialize previous
final List<INode> previous = new ArrayList<INode>();
int n = 0;
for(; n < startSize; n++) {
previous.add(newINode(n, width));
}
// make modifications to current and record the diff
final List<INode> current = new ArrayList<INode>(previous);
final List<Diff<byte[], INode>> diffs =
new ArrayList<Diff<byte[], INode>>();
for(int j = 0; j < 5; j++) {
diffs.add(new Diff<byte[], INode>());
}
for(int m = 0; m < numModifications; m++) {
final int j = m * diffs.size() / numModifications;
// if current is empty, the next operation must be create;
// otherwise, randomly pick an operation.
final int nextOperation = current.isEmpty()? 1: RANDOM.nextInt(3) + 1;
switch(nextOperation) {
case 1: // create
{
final INode i = newINode(n++, width);
create(i, current, diffs.get(j));
break;
}
case 2: // delete
{
final INode i = current.get(RANDOM.nextInt(current.size()));
delete(i, current, diffs.get(j));
break;
}
case 3: // modify
{
final INode i = current.get(RANDOM.nextInt(current.size()));
modify(i, current, diffs.get(j));
break;
}
}
}
{
// check if current == previous + diffs
List<INode> c = previous;
for(int i = 0; i < diffs.size(); i++) {
c = diffs.get(i).apply2Previous(c);
}
if (!hasIdenticalElements(current, c)) {
System.out.println("previous = " + previous);
System.out.println();
System.out.println("current = " + current);
System.out.println("c = " + c);
throw new AssertionError("current and c are not identical.");
}
// check if previous == current - diffs
List<INode> p = current;
for(int i = diffs.size() - 1; i >= 0; i--) {
p = diffs.get(i).apply2Current(p);
}
if (!hasIdenticalElements(previous, p)) {
System.out.println("previous = " + previous);
System.out.println("p = " + p);
System.out.println();
System.out.println("current = " + current);
throw new AssertionError("previous and p are not identical.");
}
}
// combine all diffs
final Diff<byte[], INode> combined = diffs.get(0);
for(int i = 1; i < diffs.size(); i++) {
combined.combinePosterior(diffs.get(i), null);
}
{
// check if current == previous + combined
final List<INode> c = combined.apply2Previous(previous);
if (!hasIdenticalElements(current, c)) {
System.out.println("previous = " + previous);
System.out.println();
System.out.println("current = " + current);
System.out.println("c = " + c);
throw new AssertionError("current and c are not identical.");
}
// check if previous == current - combined
final List<INode> p = combined.apply2Current(current);
if (!hasIdenticalElements(previous, p)) {
System.out.println("previous = " + previous);
System.out.println("p = " + p);
System.out.println();
System.out.println("current = " + current);
throw new AssertionError("previous and p are not identical.");
}
}
{
for(int m = 0; m < n; m++) {
final INode inode = newINode(m, width);
{// test accessPrevious
final Container<INode> r = combined.accessPrevious(inode.getKey());
final INode computed;
if (r != null) {
computed = r.getElement();
} else {
final int i = Diff.search(current, inode.getKey());
computed = i < 0? null: current.get(i);
}
final int j = Diff.search(previous, inode.getKey());
final INode expected = j < 0? null: previous.get(j);
// must be the same object (equals is not enough)
Assert.assertTrue(computed == expected);
}
{// test accessCurrent
final Container<INode> r = combined.accessCurrent(inode.getKey());
final INode computed;
if (r != null) {
computed = r.getElement();
} else {
final int i = Diff.search(previous, inode.getKey());
computed = i < 0? null: previous.get(i);
}
final int j = Diff.search(current, inode.getKey());
final INode expected = j < 0? null: current.get(j);
// must be the same object (equals is not enough)
Assert.assertTrue(computed == expected);
}
}
}
}
static boolean hasIdenticalElements(final List<INode> expected,
final List<INode> computed) {
if (expected == null) {
return computed == null;
}
if (expected.size() != computed.size()) {
return false;
}
for(int i = 0; i < expected.size(); i++) {
// must be the same object (equals is not enough)
if (expected.get(i) != computed.get(i)) {
return false;
}
}
return true;
}
static String toString(INode inode) {
return inode == null? null
: inode.getLocalName() + ":" + inode.getModificationTime();
}
static int findWidth(int max) {
int w = 1;
for(long n = 10; n < max; n *= 10, w++);
return w;
}
static INode newINode(int n, int width) {
byte[] name = DFSUtil.string2Bytes(String.format("n%0" + width + "d", n));
return new INodeDirectory(n, name, PERM, 0L);
}
static void create(INode inode, final List<INode> current,
Diff<byte[], INode> diff) {
final int i = Diff.search(current, inode.getKey());
Assert.assertTrue(i < 0);
current.add(-i - 1, inode);
if (diff != null) {
//test undo with 1/UNDO_TEST_P probability
final boolean testUndo = RANDOM.nextInt(UNDO_TEST_P) == 0;
String before = null;
if (testUndo) {
before = diff.toString();
}
final int undoInfo = diff.create(inode);
if (testUndo) {
final String after = diff.toString();
//undo
diff.undoCreate(inode, undoInfo);
assertDiff(before, diff);
//re-do
diff.create(inode);
assertDiff(after, diff);
}
}
}
static void delete(INode inode, final List<INode> current,
Diff<byte[], INode> diff) {
final int i = Diff.search(current, inode.getKey());
current.remove(i);
if (diff != null) {
//test undo with 1/UNDO_TEST_P probability
final boolean testUndo = RANDOM.nextInt(UNDO_TEST_P) == 0;
String before = null;
if (testUndo) {
before = diff.toString();
}
final UndoInfo<INode> undoInfo = diff.delete(inode);
if (testUndo) {
final String after = diff.toString();
//undo
diff.undoDelete(inode, undoInfo);
assertDiff(before, diff);
//re-do
diff.delete(inode);
assertDiff(after, diff);
}
}
}
static void modify(INode inode, final List<INode> current,
Diff<byte[], INode> diff) {
final int i = Diff.search(current, inode.getKey());
Assert.assertTrue(i >= 0);
final INodeDirectory oldinode = (INodeDirectory)current.get(i);
final INodeDirectory newinode = new INodeDirectory(oldinode, false,
oldinode.getFeatures());
newinode.setModificationTime(oldinode.getModificationTime() + 1);
current.set(i, newinode);
if (diff != null) {
//test undo with 1/UNDO_TEST_P probability
final boolean testUndo = RANDOM.nextInt(UNDO_TEST_P) == 0;
String before = null;
if (testUndo) {
before = diff.toString();
}
final UndoInfo<INode> undoInfo = diff.modify(oldinode, newinode);
if (testUndo) {
final String after = diff.toString();
//undo
diff.undoModify(oldinode, newinode, undoInfo);
assertDiff(before, diff);
//re-do
diff.modify(oldinode, newinode);
assertDiff(after, diff);
}
}
}
static void assertDiff(String s, Diff<byte[], INode> diff) {
Assert.assertEquals(s, diff.toString());
}
}
| 11,056 | 31.616519 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestBestEffortLongFile.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.util;
import java.io.File;
import java.io.IOException;
import java.util.Random;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.IOUtils;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestBestEffortLongFile {
private static final File FILE = new File(MiniDFSCluster.getBaseDirectory() +
File.separatorChar + "TestBestEffortLongFile");
@Before
public void cleanup() {
if (FILE.exists()) {
assertTrue(FILE.delete());
}
FILE.getParentFile().mkdirs();
}
@Test
public void testGetSet() throws IOException {
BestEffortLongFile f = new BestEffortLongFile(FILE, 12345L);
try {
// Before the file exists, should return default.
assertEquals(12345L, f.get());
// And first access should open it.
assertTrue(FILE.exists());
Random r = new Random();
for (int i = 0; i < 100; i++) {
long newVal = r.nextLong();
// Changing the value should be reflected in the next get() call.
f.set(newVal);
assertEquals(newVal, f.get());
// And should be reflected in a new instance (ie it actually got
// written to the file)
BestEffortLongFile f2 = new BestEffortLongFile(FILE, 999L);
try {
assertEquals(newVal, f2.get());
} finally {
IOUtils.closeStream(f2);
}
}
} finally {
IOUtils.closeStream(f);
}
}
@Test
public void testTruncatedFileReturnsDefault() throws IOException {
assertTrue(FILE.createNewFile());
assertEquals(0, FILE.length());
BestEffortLongFile f = new BestEffortLongFile(FILE, 12345L);
try {
assertEquals(12345L, f.get());
} finally {
f.close();
}
}
}
| 2,631 | 29.252874 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestByteArrayManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.util;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.LinkedList;
import java.util.List;
import java.util.Random;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.util.ByteArrayManager.Counter;
import org.apache.hadoop.hdfs.util.ByteArrayManager.CounterMap;
import org.apache.hadoop.hdfs.util.ByteArrayManager.FixedLengthManager;
import org.apache.hadoop.hdfs.util.ByteArrayManager.ManagerMap;
import org.apache.hadoop.util.Time;
import org.apache.log4j.Level;
import org.junit.Assert;
import org.junit.Test;
/**
* Test {@link ByteArrayManager}.
*/
public class TestByteArrayManager {
static {
((Log4JLogger)LogFactory.getLog(ByteArrayManager.class)
).getLogger().setLevel(Level.ALL);
}
static final Log LOG = LogFactory.getLog(TestByteArrayManager.class);
private static final Comparator<Future<Integer>> CMP = new Comparator<Future<Integer>>() {
@Override
public int compare(Future<Integer> left, Future<Integer> right) {
try {
return left.get().intValue() - right.get().intValue();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
};
@Test
public void testCounter() throws Exception {
final long countResetTimePeriodMs = 200L;
final Counter c = new Counter(countResetTimePeriodMs);
final int n = ThreadLocalRandom.current().nextInt(512) + 512;
final List<Future<Integer>> futures = new ArrayList<Future<Integer>>(n);
final ExecutorService pool = Executors.newFixedThreadPool(32);
try {
// increment
for(int i = 0; i < n; i++) {
futures.add(pool.submit(new Callable<Integer>() {
@Override
public Integer call() throws Exception {
return (int)c.increment();
}
}));
}
// sort and wait for the futures
Collections.sort(futures, CMP);
} finally {
pool.shutdown();
}
// check futures
Assert.assertEquals(n, futures.size());
for(int i = 0; i < n; i++) {
Assert.assertEquals(i + 1, futures.get(i).get().intValue());
}
Assert.assertEquals(n, c.getCount());
// test auto-reset
Thread.sleep(countResetTimePeriodMs + 100);
Assert.assertEquals(1, c.increment());
}
@Test
public void testAllocateRecycle() throws Exception {
final int countThreshold = 4;
final int countLimit = 8;
final long countResetTimePeriodMs = 200L;
final ByteArrayManager.Impl bam = new ByteArrayManager.Impl(
new ByteArrayManager.Conf(
countThreshold, countLimit, countResetTimePeriodMs));
final CounterMap counters = bam.getCounters();
final ManagerMap managers = bam.getManagers();
final int[] uncommonArrays = {0, 1, 2, 4, 8, 16, 32, 64};
final int arrayLength = 1024;
final Allocator allocator = new Allocator(bam);
final Recycler recycler = new Recycler(bam);
try {
{ // allocate within threshold
for(int i = 0; i < countThreshold; i++) {
allocator.submit(arrayLength);
}
waitForAll(allocator.futures);
Assert.assertEquals(countThreshold,
counters.get(arrayLength, false).getCount());
Assert.assertNull(managers.get(arrayLength, false));
for(int n : uncommonArrays) {
Assert.assertNull(counters.get(n, false));
Assert.assertNull(managers.get(n, false));
}
}
{ // recycle half of the arrays
for(int i = 0; i < countThreshold/2; i++) {
recycler.submit(removeLast(allocator.futures).get());
}
for(Future<Integer> f : recycler.furtures) {
Assert.assertEquals(-1, f.get().intValue());
}
recycler.furtures.clear();
}
{ // allocate one more
allocator.submit(arrayLength).get();
Assert.assertEquals(countThreshold + 1, counters.get(arrayLength, false).getCount());
Assert.assertNotNull(managers.get(arrayLength, false));
}
{ // recycle the remaining arrays
final int n = allocator.recycleAll(recycler);
recycler.verify(n);
}
{
// allocate until the maximum.
for(int i = 0; i < countLimit; i++) {
allocator.submit(arrayLength);
}
waitForAll(allocator.futures);
// allocate one more should be blocked
final AllocatorThread t = new AllocatorThread(arrayLength, bam);
t.start();
// check if the thread is waiting, timed wait or runnable.
for(int i = 0; i < 5; i++) {
Thread.sleep(100);
final Thread.State threadState = t.getState();
if (threadState != Thread.State.RUNNABLE
&& threadState != Thread.State.WAITING
&& threadState != Thread.State.TIMED_WAITING) {
Assert.fail("threadState = " + threadState);
}
}
// recycle an array
recycler.submit(removeLast(allocator.futures).get());
Assert.assertEquals(1, removeLast(recycler.furtures).get().intValue());
// check if the thread is unblocked
Thread.sleep(100);
Assert.assertEquals(Thread.State.TERMINATED, t.getState());
// recycle the remaining, the recycle should be full.
Assert.assertEquals(countLimit-1, allocator.recycleAll(recycler));
recycler.submit(t.array);
recycler.verify(countLimit);
// recycle one more; it should not increase the free queue size
Assert.assertEquals(countLimit, bam.release(new byte[arrayLength]));
}
} finally {
allocator.pool.shutdown();
recycler.pool.shutdown();
}
}
static <T> Future<T> removeLast(List<Future<T>> furtures) throws Exception {
return remove(furtures, furtures.size() - 1);
}
static <T> Future<T> remove(List<Future<T>> furtures, int i) throws Exception {
return furtures.isEmpty()? null: furtures.remove(i);
}
static <T> void waitForAll(List<Future<T>> furtures) throws Exception {
for(Future<T> f : furtures) {
f.get();
}
}
static class AllocatorThread extends Thread {
private final ByteArrayManager bam;
private final int arrayLength;
private byte[] array;
AllocatorThread(int arrayLength, ByteArrayManager bam) {
this.bam = bam;
this.arrayLength = arrayLength;
}
@Override
public void run() {
try {
array = bam.newByteArray(arrayLength);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
static class Allocator {
private final ByteArrayManager bam;
final ExecutorService pool = Executors.newFixedThreadPool(8);
final List<Future<byte[]>> futures = new LinkedList<Future<byte[]>>();
Allocator(ByteArrayManager bam) {
this.bam = bam;
}
Future<byte[]> submit(final int arrayLength) {
final Future<byte[]> f = pool.submit(new Callable<byte[]>() {
@Override
public byte[] call() throws Exception {
final byte[] array = bam.newByteArray(arrayLength);
Assert.assertEquals(arrayLength, array.length);
return array;
}
});
futures.add(f);
return f;
}
int recycleAll(Recycler recycler) throws Exception {
final int n = futures.size();
for(Future<byte[]> f : futures) {
recycler.submit(f.get());
}
futures.clear();
return n;
}
}
static class Recycler {
private final ByteArrayManager bam;
final ExecutorService pool = Executors.newFixedThreadPool(8);
final List<Future<Integer>> furtures = new LinkedList<Future<Integer>>();
Recycler(ByteArrayManager bam) {
this.bam = bam;
}
Future<Integer> submit(final byte[] array) {
final Future<Integer> f = pool.submit(new Callable<Integer>() {
@Override
public Integer call() throws Exception {
return bam.release(array);
}
});
furtures.add(f);
return f;
}
void verify(final int expectedSize) throws Exception {
Assert.assertEquals(expectedSize, furtures.size());
Collections.sort(furtures, CMP);
for(int i = 0; i < furtures.size(); i++) {
Assert.assertEquals(i+1, furtures.get(i).get().intValue());
}
furtures.clear();
}
}
@Test
public void testByteArrayManager() throws Exception {
final int countThreshold = 32;
final int countLimit = 64;
final long countResetTimePeriodMs = 1000L;
final ByteArrayManager.Impl bam = new ByteArrayManager.Impl(
new ByteArrayManager.Conf(
countThreshold, countLimit, countResetTimePeriodMs));
final CounterMap counters = bam.getCounters();
final ManagerMap managers = bam.getManagers();
final ExecutorService pool = Executors.newFixedThreadPool(128);
final Runner[] runners = new Runner[Runner.NUM_RUNNERS];
final Thread[] threads = new Thread[runners.length];
final int num = 1 << 10;
for(int i = 0; i < runners.length; i++) {
runners[i] = new Runner(i, countThreshold, countLimit, pool, i, bam);
threads[i] = runners[i].start(num);
}
final List<Exception> exceptions = new ArrayList<Exception>();
final Thread randomRecycler = new Thread() {
@Override
public void run() {
LOG.info("randomRecycler start");
for(int i = 0; shouldRun(); i++) {
final int j = ThreadLocalRandom.current().nextInt(runners.length);
try {
runners[j].recycle();
} catch (Exception e) {
e.printStackTrace();
exceptions.add(new Exception(this + " has an exception", e));
}
if ((i & 0xFF) == 0) {
LOG.info("randomRecycler sleep, i=" + i);
sleepMs(100);
}
}
LOG.info("randomRecycler done");
}
boolean shouldRun() {
for(int i = 0; i < runners.length; i++) {
if (threads[i].isAlive()) {
return true;
}
if (!runners[i].isEmpty()) {
return true;
}
}
return false;
}
};
randomRecycler.start();
randomRecycler.join();
Assert.assertTrue(exceptions.isEmpty());
Assert.assertNull(counters.get(0, false));
for(int i = 1; i < runners.length; i++) {
if (!runners[i].assertionErrors.isEmpty()) {
for(AssertionError e : runners[i].assertionErrors) {
LOG.error("AssertionError " + i, e);
}
Assert.fail(runners[i].assertionErrors.size() + " AssertionError(s)");
}
final int arrayLength = Runner.index2arrayLength(i);
final boolean exceedCountThreshold = counters.get(arrayLength, false).getCount() > countThreshold;
final FixedLengthManager m = managers.get(arrayLength, false);
if (exceedCountThreshold) {
Assert.assertNotNull(m);
} else {
Assert.assertNull(m);
}
}
}
static void sleepMs(long ms) {
try {
Thread.sleep(ms);
} catch (InterruptedException e) {
e.printStackTrace();
Assert.fail("Sleep is interrupted: " + e);
}
}
static class Runner implements Runnable {
static final int NUM_RUNNERS = 5;
static int index2arrayLength(int index) {
return ByteArrayManager.MIN_ARRAY_LENGTH << (index - 1);
}
private final ByteArrayManager bam;
final int maxArrayLength;
final int countThreshold;
final int maxArrays;
final ExecutorService pool;
final List<Future<byte[]>> arrays = new ArrayList<Future<byte[]>>();
final AtomicInteger count = new AtomicInteger();
final int p;
private int n;
final List<AssertionError> assertionErrors = new ArrayList<AssertionError>();
Runner(int index, int countThreshold, int maxArrays,
ExecutorService pool, int p, ByteArrayManager bam) {
this.maxArrayLength = index2arrayLength(index);
this.countThreshold = countThreshold;
this.maxArrays = maxArrays;
this.pool = pool;
this.p = p;
this.bam = bam;
}
boolean isEmpty() {
synchronized (arrays) {
return arrays.isEmpty();
}
}
Future<byte[]> submitAllocate() {
count.incrementAndGet();
final Future<byte[]> f = pool.submit(new Callable<byte[]>() {
@Override
public byte[] call() throws Exception {
final int lower = maxArrayLength == ByteArrayManager.MIN_ARRAY_LENGTH?
0: maxArrayLength >> 1;
final int arrayLength = ThreadLocalRandom.current().nextInt(
maxArrayLength - lower) + lower + 1;
final byte[] array = bam.newByteArray(arrayLength);
try {
Assert.assertEquals("arrayLength=" + arrayLength + ", lower=" + lower,
maxArrayLength, array.length);
} catch(AssertionError e) {
assertionErrors.add(e);
}
return array;
}
});
synchronized (arrays) {
arrays.add(f);
}
return f;
}
Future<byte[]> removeFirst() throws Exception {
synchronized (arrays) {
return remove(arrays, 0);
}
}
void recycle() throws Exception {
final Future<byte[]> f = removeFirst();
if (f != null) {
printf("randomRecycler: ");
try {
recycle(f.get(10, TimeUnit.MILLISECONDS));
} catch(TimeoutException e) {
recycle(new byte[maxArrayLength]);
printf("timeout, new byte[%d]\n", maxArrayLength);
}
}
}
int recycle(final byte[] array) {
return bam.release(array);
}
Future<Integer> submitRecycle(final byte[] array) {
count.decrementAndGet();
final Future<Integer> f = pool.submit(new Callable<Integer>() {
@Override
public Integer call() throws Exception {
return recycle(array);
}
});
return f;
}
@Override
public void run() {
for(int i = 0; i < n; i++) {
final boolean isAllocate = ThreadLocalRandom.current()
.nextInt(NUM_RUNNERS) < p;
if (isAllocate) {
submitAllocate();
} else {
try {
final Future<byte[]> f = removeFirst();
if (f != null) {
submitRecycle(f.get());
}
} catch (Exception e) {
e.printStackTrace();
Assert.fail(this + " has " + e);
}
}
if ((i & 0xFF) == 0) {
sleepMs(100);
}
}
}
Thread start(int n) {
this.n = n;
final Thread t = new Thread(this);
t.start();
return t;
}
@Override
public String toString() {
return getClass().getSimpleName() + ": max=" + maxArrayLength
+ ", count=" + count;
}
}
static class NewByteArrayWithLimit extends ByteArrayManager {
private final int maxCount;
private int count = 0;
NewByteArrayWithLimit(int maxCount) {
this.maxCount = maxCount;
}
@Override
public synchronized byte[] newByteArray(int size) throws InterruptedException {
for(; count >= maxCount; ) {
wait();
}
count++;
return new byte[size];
}
@Override
public synchronized int release(byte[] array) {
if (count == maxCount) {
notifyAll();
}
count--;
return 0;
}
}
public static void main(String[] args) throws Exception {
((Log4JLogger)LogFactory.getLog(ByteArrayManager.class)
).getLogger().setLevel(Level.OFF);
final int arrayLength = 64 * 1024; //64k
final int nThreads = 512;
final int nAllocations = 1 << 15;
final int maxArrays = 1 << 10;
final int nTrials = 5;
System.out.println("arrayLength=" + arrayLength
+ ", nThreads=" + nThreads
+ ", nAllocations=" + nAllocations
+ ", maxArrays=" + maxArrays);
final ByteArrayManager[] impls = {
new ByteArrayManager.NewByteArrayWithoutLimit(),
new NewByteArrayWithLimit(maxArrays),
new ByteArrayManager.Impl(new ByteArrayManager.Conf(
HdfsClientConfigKeys.Write.ByteArrayManager.COUNT_THRESHOLD_DEFAULT,
maxArrays,
HdfsClientConfigKeys.Write.ByteArrayManager.COUNT_RESET_TIME_PERIOD_MS_DEFAULT))
};
final double[] avg = new double[impls.length];
for(int i = 0; i < impls.length; i++) {
double duration = 0;
printf("%26s:", impls[i].getClass().getSimpleName());
for(int j = 0; j < nTrials; j++) {
final int[] sleepTime = new int[nAllocations];
for(int k = 0; k < sleepTime.length; k++) {
sleepTime[k] = ThreadLocalRandom.current().nextInt(100);
}
final long elapsed = performanceTest(arrayLength, maxArrays, nThreads,
sleepTime, impls[i]);
duration += elapsed;
printf("%5d, ", elapsed);
}
avg[i] = duration/nTrials;
printf("avg=%6.3fs", avg[i]/1000);
for(int j = 0; j < i; j++) {
printf(" (%6.2f%%)", percentageDiff(avg[j], avg[i]));
}
printf("\n");
}
}
static double percentageDiff(double original, double newValue) {
return (newValue - original)/original*100;
}
static void printf(String format, Object... args) {
System.out.printf(format, args);
System.out.flush();
}
static long performanceTest(final int arrayLength, final int maxArrays,
final int nThreads, final int[] sleepTimeMSs, final ByteArrayManager impl)
throws Exception {
final ExecutorService pool = Executors.newFixedThreadPool(nThreads);
final List<Future<Void>> futures = new ArrayList<Future<Void>>(sleepTimeMSs.length);
final long startTime = Time.monotonicNow();
for(int i = 0; i < sleepTimeMSs.length; i++) {
final long sleepTime = sleepTimeMSs[i];
futures.add(pool.submit(new Callable<Void>() {
@Override
public Void call() throws Exception {
byte[] array = impl.newByteArray(arrayLength);
sleepMs(sleepTime);
impl.release(array);
return null;
}
}));
}
for(Future<Void> f : futures) {
f.get();
}
final long endTime = Time.monotonicNow();
pool.shutdown();
return endTime - startTime;
}
}
| 19,780 | 29.573416 | 105 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightHashSet.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.util;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.Time;
import org.junit.Before;
import org.junit.Test;
public class TestLightWeightHashSet{
private static final Log LOG = LogFactory
.getLog("org.apache.hadoop.hdfs.TestLightWeightHashSet");
private final ArrayList<Integer> list = new ArrayList<Integer>();
private final int NUM = 100;
private LightWeightHashSet<Integer> set;
private Random rand;
@Before
public void setUp() {
float maxF = LightWeightHashSet.DEFAULT_MAX_LOAD_FACTOR;
float minF = LightWeightHashSet.DEFAUT_MIN_LOAD_FACTOR;
int initCapacity = LightWeightHashSet.MINIMUM_CAPACITY;
rand = new Random(Time.now());
list.clear();
for (int i = 0; i < NUM; i++) {
list.add(rand.nextInt());
}
set = new LightWeightHashSet<Integer>(initCapacity, maxF, minF);
}
@Test
public void testEmptyBasic() {
LOG.info("Test empty basic");
Iterator<Integer> iter = set.iterator();
// iterator should not have next
assertFalse(iter.hasNext());
assertEquals(0, set.size());
assertTrue(set.isEmpty());
LOG.info("Test empty - DONE");
}
@Test
public void testOneElementBasic() {
LOG.info("Test one element basic");
set.add(list.get(0));
// set should be non-empty
assertEquals(1, set.size());
assertFalse(set.isEmpty());
// iterator should have next
Iterator<Integer> iter = set.iterator();
assertTrue(iter.hasNext());
// iterator should not have next
assertEquals(list.get(0), iter.next());
assertFalse(iter.hasNext());
LOG.info("Test one element basic - DONE");
}
@Test
public void testMultiBasic() {
LOG.info("Test multi element basic");
// add once
for (Integer i : list) {
assertTrue(set.add(i));
}
assertEquals(list.size(), set.size());
// check if the elements are in the set
for (Integer i : list) {
assertTrue(set.contains(i));
}
// add again - should return false each time
for (Integer i : list) {
assertFalse(set.add(i));
}
// check again if the elements are there
for (Integer i : list) {
assertTrue(set.contains(i));
}
Iterator<Integer> iter = set.iterator();
int num = 0;
while (iter.hasNext()) {
Integer next = iter.next();
assertNotNull(next);
assertTrue(list.contains(next));
num++;
}
// check the number of element from the iterator
assertEquals(list.size(), num);
LOG.info("Test multi element basic - DONE");
}
@Test
public void testRemoveOne() {
LOG.info("Test remove one");
assertTrue(set.add(list.get(0)));
assertEquals(1, set.size());
// remove from the head/tail
assertTrue(set.remove(list.get(0)));
assertEquals(0, set.size());
// check the iterator
Iterator<Integer> iter = set.iterator();
assertFalse(iter.hasNext());
// add the element back to the set
assertTrue(set.add(list.get(0)));
assertEquals(1, set.size());
iter = set.iterator();
assertTrue(iter.hasNext());
LOG.info("Test remove one - DONE");
}
@Test
public void testRemoveMulti() {
LOG.info("Test remove multi");
for (Integer i : list) {
assertTrue(set.add(i));
}
for (int i = 0; i < NUM / 2; i++) {
assertTrue(set.remove(list.get(i)));
}
// the deleted elements should not be there
for (int i = 0; i < NUM / 2; i++) {
assertFalse(set.contains(list.get(i)));
}
// the rest should be there
for (int i = NUM / 2; i < NUM; i++) {
assertTrue(set.contains(list.get(i)));
}
LOG.info("Test remove multi - DONE");
}
@Test
public void testRemoveAll() {
LOG.info("Test remove all");
for (Integer i : list) {
assertTrue(set.add(i));
}
for (int i = 0; i < NUM; i++) {
assertTrue(set.remove(list.get(i)));
}
// the deleted elements should not be there
for (int i = 0; i < NUM; i++) {
assertFalse(set.contains(list.get(i)));
}
// iterator should not have next
Iterator<Integer> iter = set.iterator();
assertFalse(iter.hasNext());
assertTrue(set.isEmpty());
LOG.info("Test remove all - DONE");
}
@Test
public void testPollAll() {
LOG.info("Test poll all");
for (Integer i : list) {
assertTrue(set.add(i));
}
// remove all elements by polling
List<Integer> poll = set.pollAll();
assertEquals(0, set.size());
assertTrue(set.isEmpty());
// the deleted elements should not be there
for (int i = 0; i < NUM; i++) {
assertFalse(set.contains(list.get(i)));
}
// we should get all original items
for (Integer i : poll) {
assertTrue(list.contains(i));
}
Iterator<Integer> iter = set.iterator();
assertFalse(iter.hasNext());
LOG.info("Test poll all - DONE");
}
@Test
public void testPollNMulti() {
LOG.info("Test pollN multi");
// use addAll
set.addAll(list);
// poll zero
List<Integer> poll = set.pollN(0);
assertEquals(0, poll.size());
for (Integer i : list) {
assertTrue(set.contains(i));
}
// poll existing elements (less than size)
poll = set.pollN(10);
assertEquals(10, poll.size());
for (Integer i : poll) {
// should be in original items
assertTrue(list.contains(i));
// should not be in the set anymore
assertFalse(set.contains(i));
}
// poll more elements than present
poll = set.pollN(1000);
assertEquals(NUM - 10, poll.size());
for (Integer i : poll) {
// should be in original items
assertTrue(list.contains(i));
}
// set is empty
assertTrue(set.isEmpty());
assertEquals(0, set.size());
LOG.info("Test pollN multi - DONE");
}
@Test
public void testPollNMultiArray() {
LOG.info("Test pollN multi array");
// use addAll
set.addAll(list);
// poll existing elements (less than size)
Integer[] poll = new Integer[10];
poll = set.pollToArray(poll);
assertEquals(10, poll.length);
for (Integer i : poll) {
// should be in original items
assertTrue(list.contains(i));
// should not be in the set anymore
assertFalse(set.contains(i));
}
// poll other elements (more than size)
poll = new Integer[NUM];
poll = set.pollToArray(poll);
assertEquals(NUM - 10, poll.length);
for (int i = 0; i < NUM - 10; i++) {
assertTrue(list.contains(poll[i]));
}
// set is empty
assertTrue(set.isEmpty());
assertEquals(0, set.size());
// //////
set.addAll(list);
// poll existing elements (exactly the size)
poll = new Integer[NUM];
poll = set.pollToArray(poll);
assertTrue(set.isEmpty());
assertEquals(0, set.size());
assertEquals(NUM, poll.length);
for (int i = 0; i < NUM; i++) {
assertTrue(list.contains(poll[i]));
}
// //////
// //////
set.addAll(list);
// poll existing elements (exactly the size)
poll = new Integer[0];
poll = set.pollToArray(poll);
for (int i = 0; i < NUM; i++) {
assertTrue(set.contains(list.get(i)));
}
assertEquals(0, poll.length);
// //////
LOG.info("Test pollN multi array- DONE");
}
@Test
public void testClear() {
LOG.info("Test clear");
// use addAll
set.addAll(list);
assertEquals(NUM, set.size());
assertFalse(set.isEmpty());
// clear the set
set.clear();
assertEquals(0, set.size());
assertTrue(set.isEmpty());
// iterator should be empty
Iterator<Integer> iter = set.iterator();
assertFalse(iter.hasNext());
LOG.info("Test clear - DONE");
}
@Test
public void testCapacity() {
LOG.info("Test capacity");
float maxF = LightWeightHashSet.DEFAULT_MAX_LOAD_FACTOR;
float minF = LightWeightHashSet.DEFAUT_MIN_LOAD_FACTOR;
// capacity lower than min_capacity
set = new LightWeightHashSet<Integer>(1, maxF, minF);
assertEquals(LightWeightHashSet.MINIMUM_CAPACITY, set.getCapacity());
// capacity not a power of two
set = new LightWeightHashSet<Integer>(30, maxF, minF);
assertEquals(Math.max(LightWeightHashSet.MINIMUM_CAPACITY, 32),
set.getCapacity());
// capacity valid
set = new LightWeightHashSet<Integer>(64, maxF, minF);
assertEquals(Math.max(LightWeightHashSet.MINIMUM_CAPACITY, 64),
set.getCapacity());
// add NUM elements
set.addAll(list);
int expCap = LightWeightHashSet.MINIMUM_CAPACITY;
while (expCap < NUM && maxF * expCap < NUM)
expCap <<= 1;
assertEquals(expCap, set.getCapacity());
// see if the set shrinks if we remove elements by removing
set.clear();
set.addAll(list);
int toRemove = set.size() - (int) (set.getCapacity() * minF) + 1;
for (int i = 0; i < toRemove; i++) {
set.remove(list.get(i));
}
assertEquals(Math.max(LightWeightHashSet.MINIMUM_CAPACITY, expCap / 2),
set.getCapacity());
LOG.info("Test capacity - DONE");
}
@Test
public void testOther() {
LOG.info("Test other");
// remove all
assertTrue(set.addAll(list));
assertTrue(set.removeAll(list));
assertTrue(set.isEmpty());
// remove sublist
List<Integer> sub = new LinkedList<Integer>();
for (int i = 0; i < 10; i++) {
sub.add(list.get(i));
}
assertTrue(set.addAll(list));
assertTrue(set.removeAll(sub));
assertFalse(set.isEmpty());
assertEquals(NUM - 10, set.size());
for (Integer i : sub) {
assertFalse(set.contains(i));
}
assertFalse(set.containsAll(sub));
// the rest of the elements should be there
List<Integer> sub2 = new LinkedList<Integer>();
for (int i = 10; i < NUM; i++) {
sub2.add(list.get(i));
}
assertTrue(set.containsAll(sub2));
// to array
Integer[] array = set.toArray(new Integer[0]);
assertEquals(NUM - 10, array.length);
for (int i = 0; i < array.length; i++) {
assertTrue(sub2.contains(array[i]));
}
assertEquals(NUM - 10, set.size());
// to array
Object[] array2 = set.toArray();
assertEquals(NUM - 10, array2.length);
for (int i = 0; i < array2.length; i++) {
assertTrue(sub2.contains(array2[i]));
}
LOG.info("Test other - DONE");
}
@Test
public void testGetElement() {
LightWeightHashSet<TestObject> objSet = new LightWeightHashSet<TestObject>();
TestObject objA = new TestObject("object A");
TestObject equalToObjA = new TestObject("object A");
TestObject objB = new TestObject("object B");
objSet.add(objA);
objSet.add(objB);
assertSame(objA, objSet.getElement(objA));
assertSame(objA, objSet.getElement(equalToObjA));
assertSame(objB, objSet.getElement(objB));
assertNull(objSet.getElement(new TestObject("not in set")));
}
/**
* Wrapper class which is used in
* {@link TestLightWeightHashSet#testGetElement()}
*/
private static class TestObject {
private final String value;
public TestObject(String value) {
super();
this.value = value;
}
@Override
public int hashCode() {
return value.hashCode();
}
@Override
public boolean equals(Object obj) {
if (this == obj) return true;
if (obj == null) return false;
if (getClass() != obj.getClass())
return false;
TestObject other = (TestObject) obj;
return this.value.equals(other.value);
}
}
}
| 12,763 | 25.985201 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestXMLUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.util;
import org.apache.hadoop.hdfs.util.XMLUtils.UnmanglingError;
import org.junit.Assert;
import org.junit.Test;
public class TestXMLUtils {
private static void testRoundTripImpl(String str, String expectedMangled,
boolean encodeEntityRefs) {
String mangled = XMLUtils.mangleXmlString(str, encodeEntityRefs);
Assert.assertEquals(expectedMangled, mangled);
String unmangled = XMLUtils.unmangleXmlString(mangled, encodeEntityRefs);
Assert.assertEquals(str, unmangled);
}
private static void testRoundTrip(String str, String expectedMangled) {
testRoundTripImpl(str, expectedMangled, false);
}
private static void testRoundTripWithEntityRefs(String str,
String expectedMangled) {
testRoundTripImpl(str, expectedMangled, true);
}
@Test
public void testMangleEmptyString() throws Exception {
testRoundTrip("", "");
}
@Test
public void testMangleVanillaString() throws Exception {
testRoundTrip("abcdef", "abcdef");
}
@Test
public void testMangleStringWithBackSlash() throws Exception {
testRoundTrip("a\\bcdef", "a\\005c;bcdef");
testRoundTrip("\\\\", "\\005c;\\005c;");
}
@Test
public void testMangleStringWithForbiddenCodePoint() throws Exception {
testRoundTrip("a\u0001bcdef", "a\\0001;bcdef");
testRoundTrip("a\u0002\ud800bcdef", "a\\0002;\\d800;bcdef");
}
@Test
public void testInvalidSequence() throws Exception {
try {
XMLUtils.unmangleXmlString("\\000g;foo", false);
Assert.fail("expected an unmangling error");
} catch (UnmanglingError e) {
// pass through
}
try {
XMLUtils.unmangleXmlString("\\0", false);
Assert.fail("expected an unmangling error");
} catch (UnmanglingError e) {
// pass through
}
}
@Test
public void testAddEntityRefs() throws Exception {
testRoundTripWithEntityRefs("The Itchy & Scratchy Show",
"The Itchy & Scratchy Show");
testRoundTripWithEntityRefs("\"He said '1 < 2, but 2 > 1'\"",
""He said '1 < 2, but 2 > 1'"");
testRoundTripWithEntityRefs("\u0001 < \u0002", "\\0001; < \\0002;");
}
}
| 3,050 | 33.280899 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightLinkedSet.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.util;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.Time;
import org.junit.Before;
import org.junit.Test;
public class TestLightWeightLinkedSet {
private static final Log LOG = LogFactory
.getLog("org.apache.hadoop.hdfs.TestLightWeightLinkedSet");
private final ArrayList<Integer> list = new ArrayList<Integer>();
private final int NUM = 100;
private LightWeightLinkedSet<Integer> set;
private Random rand;
@Before
public void setUp() {
float maxF = LightWeightLinkedSet.DEFAULT_MAX_LOAD_FACTOR;
float minF = LightWeightLinkedSet.DEFAUT_MIN_LOAD_FACTOR;
int initCapacity = LightWeightLinkedSet.MINIMUM_CAPACITY;
rand = new Random(Time.now());
list.clear();
for (int i = 0; i < NUM; i++) {
list.add(rand.nextInt());
}
set = new LightWeightLinkedSet<Integer>(initCapacity, maxF, minF);
}
@Test
public void testEmptyBasic() {
LOG.info("Test empty basic");
Iterator<Integer> iter = set.iterator();
// iterator should not have next
assertFalse(iter.hasNext());
assertEquals(0, set.size());
assertTrue(set.isEmpty());
// poll should return nothing
assertNull(set.pollFirst());
assertEquals(0, set.pollAll().size());
assertEquals(0, set.pollN(10).size());
LOG.info("Test empty - DONE");
}
@Test
public void testOneElementBasic() {
LOG.info("Test one element basic");
set.add(list.get(0));
// set should be non-empty
assertEquals(1, set.size());
assertFalse(set.isEmpty());
// iterator should have next
Iterator<Integer> iter = set.iterator();
assertTrue(iter.hasNext());
// iterator should not have next
assertEquals(list.get(0), iter.next());
assertFalse(iter.hasNext());
LOG.info("Test one element basic - DONE");
}
@Test
public void testMultiBasic() {
LOG.info("Test multi element basic");
// add once
for (Integer i : list) {
assertTrue(set.add(i));
}
assertEquals(list.size(), set.size());
// check if the elements are in the set
for (Integer i : list) {
assertTrue(set.contains(i));
}
// add again - should return false each time
for (Integer i : list) {
assertFalse(set.add(i));
}
// check again if the elements are there
for (Integer i : list) {
assertTrue(set.contains(i));
}
Iterator<Integer> iter = set.iterator();
int num = 0;
while (iter.hasNext()) {
assertEquals(list.get(num++), iter.next());
}
// check the number of element from the iterator
assertEquals(list.size(), num);
LOG.info("Test multi element basic - DONE");
}
@Test
public void testRemoveOne() {
LOG.info("Test remove one");
assertTrue(set.add(list.get(0)));
assertEquals(1, set.size());
// remove from the head/tail
assertTrue(set.remove(list.get(0)));
assertEquals(0, set.size());
// check the iterator
Iterator<Integer> iter = set.iterator();
assertFalse(iter.hasNext());
// poll should return nothing
assertNull(set.pollFirst());
assertEquals(0, set.pollAll().size());
assertEquals(0, set.pollN(10).size());
// add the element back to the set
assertTrue(set.add(list.get(0)));
assertEquals(1, set.size());
iter = set.iterator();
assertTrue(iter.hasNext());
LOG.info("Test remove one - DONE");
}
@Test
public void testRemoveMulti() {
LOG.info("Test remove multi");
for (Integer i : list) {
assertTrue(set.add(i));
}
for (int i = 0; i < NUM / 2; i++) {
assertTrue(set.remove(list.get(i)));
}
// the deleted elements should not be there
for (int i = 0; i < NUM / 2; i++) {
assertFalse(set.contains(list.get(i)));
}
// the rest should be there
for (int i = NUM / 2; i < NUM; i++) {
assertTrue(set.contains(list.get(i)));
}
Iterator<Integer> iter = set.iterator();
// the remaining elements should be in order
int num = NUM / 2;
while (iter.hasNext()) {
assertEquals(list.get(num++), iter.next());
}
assertEquals(num, NUM);
LOG.info("Test remove multi - DONE");
}
@Test
public void testRemoveAll() {
LOG.info("Test remove all");
for (Integer i : list) {
assertTrue(set.add(i));
}
for (int i = 0; i < NUM; i++) {
assertTrue(set.remove(list.get(i)));
}
// the deleted elements should not be there
for (int i = 0; i < NUM; i++) {
assertFalse(set.contains(list.get(i)));
}
// iterator should not have next
Iterator<Integer> iter = set.iterator();
assertFalse(iter.hasNext());
assertTrue(set.isEmpty());
LOG.info("Test remove all - DONE");
}
@Test
public void testPollOneElement() {
LOG.info("Test poll one element");
set.add(list.get(0));
assertEquals(list.get(0), set.pollFirst());
assertNull(set.pollFirst());
LOG.info("Test poll one element - DONE");
}
@Test
public void testPollMulti() {
LOG.info("Test poll multi");
for (Integer i : list) {
assertTrue(set.add(i));
}
// remove half of the elements by polling
for (int i = 0; i < NUM / 2; i++) {
assertEquals(list.get(i), set.pollFirst());
}
assertEquals(NUM / 2, set.size());
// the deleted elements should not be there
for (int i = 0; i < NUM / 2; i++) {
assertFalse(set.contains(list.get(i)));
}
// the rest should be there
for (int i = NUM / 2; i < NUM; i++) {
assertTrue(set.contains(list.get(i)));
}
Iterator<Integer> iter = set.iterator();
// the remaining elements should be in order
int num = NUM / 2;
while (iter.hasNext()) {
assertEquals(list.get(num++), iter.next());
}
assertEquals(num, NUM);
// add elements back
for (int i = 0; i < NUM / 2; i++) {
assertTrue(set.add(list.get(i)));
}
// order should be switched
assertEquals(NUM, set.size());
for (int i = NUM / 2; i < NUM; i++) {
assertEquals(list.get(i), set.pollFirst());
}
for (int i = 0; i < NUM / 2; i++) {
assertEquals(list.get(i), set.pollFirst());
}
assertEquals(0, set.size());
assertTrue(set.isEmpty());
LOG.info("Test poll multi - DONE");
}
@Test
public void testPollAll() {
LOG.info("Test poll all");
for (Integer i : list) {
assertTrue(set.add(i));
}
// remove all elements by polling
while (set.pollFirst() != null);
assertEquals(0, set.size());
assertTrue(set.isEmpty());
// the deleted elements should not be there
for (int i = 0; i < NUM; i++) {
assertFalse(set.contains(list.get(i)));
}
Iterator<Integer> iter = set.iterator();
assertFalse(iter.hasNext());
LOG.info("Test poll all - DONE");
}
@Test
public void testPollNOne() {
LOG.info("Test pollN one");
set.add(list.get(0));
List<Integer> l = set.pollN(10);
assertEquals(1, l.size());
assertEquals(list.get(0), l.get(0));
LOG.info("Test pollN one - DONE");
}
@Test
public void testPollNMulti() {
LOG.info("Test pollN multi");
// use addAll
set.addAll(list);
// poll existing elements
List<Integer> l = set.pollN(10);
assertEquals(10, l.size());
for (int i = 0; i < 10; i++) {
assertEquals(list.get(i), l.get(i));
}
// poll more elements than present
l = set.pollN(1000);
assertEquals(NUM - 10, l.size());
// check the order
for (int i = 10; i < NUM; i++) {
assertEquals(list.get(i), l.get(i - 10));
}
// set is empty
assertTrue(set.isEmpty());
assertEquals(0, set.size());
LOG.info("Test pollN multi - DONE");
}
@Test
public void testClear() {
LOG.info("Test clear");
// use addAll
set.addAll(list);
assertEquals(NUM, set.size());
assertFalse(set.isEmpty());
// Advance the bookmark.
Iterator<Integer> bkmrkIt = set.getBookmark();
for (int i=0; i<set.size()/2+1; i++) {
bkmrkIt.next();
}
assertTrue(bkmrkIt.hasNext());
// clear the set
set.clear();
assertEquals(0, set.size());
assertTrue(set.isEmpty());
bkmrkIt = set.getBookmark();
assertFalse(bkmrkIt.hasNext());
// poll should return an empty list
assertEquals(0, set.pollAll().size());
assertEquals(0, set.pollN(10).size());
assertNull(set.pollFirst());
// iterator should be empty
Iterator<Integer> iter = set.iterator();
assertFalse(iter.hasNext());
LOG.info("Test clear - DONE");
}
@Test
public void testOther() {
LOG.info("Test other");
assertTrue(set.addAll(list));
// to array
Integer[] array = set.toArray(new Integer[0]);
assertEquals(NUM, array.length);
for (int i = 0; i < array.length; i++) {
assertTrue(list.contains(array[i]));
}
assertEquals(NUM, set.size());
// to array
Object[] array2 = set.toArray();
assertEquals(NUM, array2.length);
for (int i = 0; i < array2.length; i++) {
assertTrue(list.contains(array2[i]));
}
LOG.info("Test capacity - DONE");
}
@Test(timeout=60000)
public void testGetBookmarkReturnsBookmarkIterator() {
LOG.info("Test getBookmark returns proper iterator");
assertTrue(set.addAll(list));
Iterator<Integer> bookmark = set.getBookmark();
assertEquals(bookmark.next(), list.get(0));
final int numAdvance = list.size()/2;
for(int i=1; i<numAdvance; i++) {
bookmark.next();
}
Iterator<Integer> bookmark2 = set.getBookmark();
assertEquals(bookmark2.next(), list.get(numAdvance));
}
@Test(timeout=60000)
public void testBookmarkAdvancesOnRemoveOfSameElement() {
LOG.info("Test that the bookmark advances if we remove its element.");
assertTrue(set.add(list.get(0)));
assertTrue(set.add(list.get(1)));
assertTrue(set.add(list.get(2)));
Iterator<Integer> it = set.getBookmark();
assertEquals(it.next(), list.get(0));
set.remove(list.get(1));
it = set.getBookmark();
assertEquals(it.next(), list.get(2));
}
@Test(timeout=60000)
public void testBookmarkSetToHeadOnAddToEmpty() {
LOG.info("Test bookmark is set after adding to previously empty set.");
Iterator<Integer> it = set.getBookmark();
assertFalse(it.hasNext());
set.add(list.get(0));
set.add(list.get(1));
it = set.getBookmark();
assertTrue(it.hasNext());
assertEquals(it.next(), list.get(0));
assertEquals(it.next(), list.get(1));
assertFalse(it.hasNext());
}
@Test(timeout=60000)
public void testResetBookmarkPlacesBookmarkAtHead() {
set.addAll(list);
Iterator<Integer> it = set.getBookmark();
final int numAdvance = set.size()/2;
for (int i=0; i<numAdvance; i++) {
it.next();
}
assertEquals(it.next(), list.get(numAdvance));
set.resetBookmark();
it = set.getBookmark();
assertEquals(it.next(), list.get(0));
}
}
| 12,069 | 26.747126 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestMD5FileUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.util;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.IOException;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.io.MD5Hash;
import org.apache.hadoop.test.PathUtils;
import org.junit.Before;
import org.junit.Test;
public class TestMD5FileUtils {
private static final File TEST_DIR = PathUtils.getTestDir(TestMD5FileUtils.class);
private static final File TEST_FILE = new File(TEST_DIR,
"testMd5File.dat");
private static final int TEST_DATA_LEN = 128 * 1024; // 128KB test data
private static final byte[] TEST_DATA =
DFSTestUtil.generateSequentialBytes(0, TEST_DATA_LEN);
private static final MD5Hash TEST_MD5 = MD5Hash.digest(TEST_DATA);
@Before
public void setup() throws IOException {
FileUtil.fullyDelete(TEST_DIR);
assertTrue(TEST_DIR.mkdirs());
// Write a file out
FileOutputStream fos = new FileOutputStream(TEST_FILE);
fos.write(TEST_DATA);
fos.close();
}
@Test
public void testComputeMd5ForFile() throws Exception {
MD5Hash computedDigest = MD5FileUtils.computeMd5ForFile(TEST_FILE);
assertEquals(TEST_MD5, computedDigest);
}
@Test
public void testVerifyMD5FileGood() throws Exception {
MD5FileUtils.saveMD5File(TEST_FILE, TEST_MD5);
MD5FileUtils.verifySavedMD5(TEST_FILE, TEST_MD5);
}
/**
* Test when .md5 file does not exist at all
*/
@Test(expected=IOException.class)
public void testVerifyMD5FileMissing() throws Exception {
MD5FileUtils.verifySavedMD5(TEST_FILE, TEST_MD5);
}
/**
* Test when .md5 file exists but incorrect checksum
*/
@Test
public void testVerifyMD5FileBadDigest() throws Exception {
MD5FileUtils.saveMD5File(TEST_FILE, MD5Hash.digest(new byte[0]));
try {
MD5FileUtils.verifySavedMD5(TEST_FILE, TEST_MD5);
fail("Did not throw");
} catch (IOException ioe) {
// Expected
}
}
/**
* Test when .md5 file exists but has a bad format
*/
@Test
public void testVerifyMD5FileBadFormat() throws Exception {
FileWriter writer = new FileWriter(MD5FileUtils.getDigestFileForFile(TEST_FILE));
try {
writer.write("this is not an md5 file");
} finally {
writer.close();
}
try {
MD5FileUtils.verifySavedMD5(TEST_FILE, TEST_MD5);
fail("Did not throw");
} catch (IOException ioe) {
// expected
}
}
}
| 3,434 | 29.945946 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.util;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.junit.Assume.assumeTrue;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.util.Shell;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import com.google.common.base.Joiner;
public class TestAtomicFileOutputStream {
private static final String TEST_STRING = "hello world";
private static final String TEST_STRING_2 = "goodbye world";
private static final File TEST_DIR = PathUtils.getTestDir(TestAtomicFileOutputStream.class);
private static final File DST_FILE = new File(TEST_DIR, "test.txt");
@Rule
public ExpectedException exception = ExpectedException.none();
@Before
public void cleanupTestDir() throws IOException {
assertTrue(TEST_DIR.exists() || TEST_DIR.mkdirs());
FileUtil.fullyDeleteContents(TEST_DIR);
}
/**
* Test case where there is no existing file
*/
@Test
public void testWriteNewFile() throws IOException {
OutputStream fos = new AtomicFileOutputStream(DST_FILE);
assertFalse(DST_FILE.exists());
fos.write(TEST_STRING.getBytes());
fos.flush();
assertFalse(DST_FILE.exists());
fos.close();
assertTrue(DST_FILE.exists());
String readBackData = DFSTestUtil.readFile(DST_FILE);
assertEquals(TEST_STRING, readBackData);
}
/**
* Test case where there is no existing file
*/
@Test
public void testOverwriteFile() throws IOException {
assertTrue("Creating empty dst file", DST_FILE.createNewFile());
OutputStream fos = new AtomicFileOutputStream(DST_FILE);
assertTrue("Empty file still exists", DST_FILE.exists());
fos.write(TEST_STRING.getBytes());
fos.flush();
// Original contents still in place
assertEquals("", DFSTestUtil.readFile(DST_FILE));
fos.close();
// New contents replace original file
String readBackData = DFSTestUtil.readFile(DST_FILE);
assertEquals(TEST_STRING, readBackData);
}
/**
* Test case where the flush() fails at close time - make sure
* that we clean up after ourselves and don't touch any
* existing file at the destination
*/
@Test
public void testFailToFlush() throws IOException {
// Create a file at destination
FileOutputStream fos = new FileOutputStream(DST_FILE);
fos.write(TEST_STRING_2.getBytes());
fos.close();
OutputStream failingStream = createFailingStream();
failingStream.write(TEST_STRING.getBytes());
try {
failingStream.close();
fail("Close didn't throw exception");
} catch (IOException ioe) {
// expected
}
// Should not have touched original file
assertEquals(TEST_STRING_2, DFSTestUtil.readFile(DST_FILE));
assertEquals("Temporary file should have been cleaned up",
DST_FILE.getName(), Joiner.on(",").join(TEST_DIR.list()));
}
@Test
public void testFailToRename() throws IOException {
assumeTrue(Shell.WINDOWS);
OutputStream fos = null;
try {
fos = new AtomicFileOutputStream(DST_FILE);
fos.write(TEST_STRING.getBytes());
FileUtil.setWritable(TEST_DIR, false);
exception.expect(IOException.class);
exception.expectMessage("failure in native rename");
try {
fos.close();
} finally {
fos = null;
}
} finally {
IOUtils.cleanup(null, fos);
FileUtil.setWritable(TEST_DIR, true);
}
}
/**
* Create a stream that fails to flush at close time
*/
private OutputStream createFailingStream() throws FileNotFoundException {
return new AtomicFileOutputStream(DST_FILE) {
@Override
public void flush() throws IOException {
throw new IOException("injected failure");
}
};
}
}
| 5,036 | 29.90184 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestCyclicIteration.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.util;
import static org.junit.Assert.assertEquals;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.NavigableMap;
import java.util.TreeMap;
import org.junit.Test;
public class TestCyclicIteration {
@Test
public void testCyclicIteration() throws Exception {
for(int n = 0; n < 5; n++) {
checkCyclicIteration(n);
}
}
private static void checkCyclicIteration(int numOfElements) {
//create a tree map
final NavigableMap<Integer, Integer> map = new TreeMap<Integer, Integer>();
final Integer[] integers = new Integer[numOfElements];
for(int i = 0; i < integers.length; i++) {
integers[i] = 2*i;
map.put(integers[i], integers[i]);
}
System.out.println("\n\nintegers=" + Arrays.asList(integers));
System.out.println("map=" + map);
//try starting everywhere
for(int start = -1; start <= 2*integers.length - 1; start++) {
//get a cyclic iteration
final List<Integer> iteration = new ArrayList<Integer>();
for(Map.Entry<Integer, Integer> e : new CyclicIteration<Integer, Integer>(map, start)) {
iteration.add(e.getKey());
}
System.out.println("start=" + start + ", iteration=" + iteration);
//verify results
for(int i = 0; i < integers.length; i++) {
final int j = ((start+2)/2 + i)%integers.length;
assertEquals("i=" + i + ", j=" + j, iteration.get(i), integers[j]);
}
}
}
}
| 2,331 | 34.333333 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.*;
import java.util.ArrayList;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.SortedSet;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.FeatureInfo;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.LayoutFeature;
import org.apache.hadoop.hdfs.server.datanode.DataNodeLayoutVersion;
import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
import org.junit.Test;
/**
* Test for {@link LayoutVersion}
*/
public class TestLayoutVersion {
public static final LayoutFeature LAST_NON_RESERVED_COMMON_FEATURE;
public static final LayoutFeature LAST_COMMON_FEATURE;
static {
final Feature[] features = Feature.values();
LAST_COMMON_FEATURE = features[features.length - 1];
LAST_NON_RESERVED_COMMON_FEATURE = LayoutVersion.getLastNonReservedFeature(features);
}
/**
* Tests to make sure a given layout version supports all the
* features from the ancestor
*/
@Test
public void testFeaturesFromAncestorSupported() {
for (LayoutFeature f : Feature.values()) {
validateFeatureList(f);
}
}
/**
* Test to make sure 0.20.203 supports delegation token
*/
@Test
public void testRelease203() {
assertTrue(NameNodeLayoutVersion.supports(LayoutVersion.Feature.DELEGATION_TOKEN,
Feature.RESERVED_REL20_203.getInfo().getLayoutVersion()));
}
/**
* Test to make sure 0.20.204 supports delegation token
*/
@Test
public void testRelease204() {
assertTrue(NameNodeLayoutVersion.supports(LayoutVersion.Feature.DELEGATION_TOKEN,
Feature.RESERVED_REL20_204.getInfo().getLayoutVersion()));
}
/**
* Test to make sure release 1.2.0 support CONCAT
*/
@Test
public void testRelease1_2_0() {
assertTrue(NameNodeLayoutVersion.supports(LayoutVersion.Feature.CONCAT,
Feature.RESERVED_REL1_2_0.getInfo().getLayoutVersion()));
}
/**
* Test to make sure NameNode.Feature support previous features
*/
@Test
public void testNameNodeFeature() {
final LayoutFeature first = NameNodeLayoutVersion.Feature.ROLLING_UPGRADE;
assertTrue(NameNodeLayoutVersion.supports(LAST_NON_RESERVED_COMMON_FEATURE,
first.getInfo().getLayoutVersion()));
assertEquals(LAST_COMMON_FEATURE.getInfo().getLayoutVersion() - 1,
first.getInfo().getLayoutVersion());
}
/**
* Test to make sure DataNode.Feature support previous features
*/
@Test
public void testDataNodeFeature() {
final LayoutFeature first = DataNodeLayoutVersion.Feature.FIRST_LAYOUT;
assertTrue(DataNodeLayoutVersion.supports(LAST_NON_RESERVED_COMMON_FEATURE,
first.getInfo().getLayoutVersion()));
assertEquals(LAST_COMMON_FEATURE.getInfo().getLayoutVersion() - 1,
first.getInfo().getLayoutVersion());
}
/**
* Tests expected values for minimum compatible layout version in NameNode
* features. TRUNCATE, APPEND_NEW_BLOCK and QUOTA_BY_STORAGE_TYPE are all
* features that launched in the same release. TRUNCATE was added first, so
* we expect all 3 features to have a minimum compatible layout version equal
* to TRUNCATE's layout version. All features older than that existed prior
* to the concept of a minimum compatible layout version, so for each one, the
* minimum compatible layout version must be equal to itself.
*/
@Test
public void testNameNodeFeatureMinimumCompatibleLayoutVersions() {
int baseLV = NameNodeLayoutVersion.Feature.TRUNCATE.getInfo()
.getLayoutVersion();
EnumSet<NameNodeLayoutVersion.Feature> compatibleFeatures = EnumSet.of(
NameNodeLayoutVersion.Feature.TRUNCATE,
NameNodeLayoutVersion.Feature.APPEND_NEW_BLOCK,
NameNodeLayoutVersion.Feature.QUOTA_BY_STORAGE_TYPE);
for (LayoutFeature f : compatibleFeatures) {
assertEquals(String.format("Expected minimum compatible layout version " +
"%d for feature %s.", baseLV, f), baseLV,
f.getInfo().getMinimumCompatibleLayoutVersion());
}
List<LayoutFeature> features = new ArrayList<>();
features.addAll(EnumSet.allOf(LayoutVersion.Feature.class));
features.addAll(EnumSet.allOf(NameNodeLayoutVersion.Feature.class));
for (LayoutFeature f : features) {
if (!compatibleFeatures.contains(f)) {
assertEquals(String.format("Expected feature %s to have minimum " +
"compatible layout version set to itself.", f),
f.getInfo().getLayoutVersion(),
f.getInfo().getMinimumCompatibleLayoutVersion());
}
}
}
/**
* Tests that NameNode features are listed in order of minimum compatible
* layout version. It would be inconsistent to have features listed out of
* order with respect to minimum compatible layout version, because it would
* imply going back in time to change compatibility logic in a software release
* that had already shipped.
*/
@Test
public void testNameNodeFeatureMinimumCompatibleLayoutVersionAscending() {
LayoutFeature prevF = null;
for (LayoutFeature f : EnumSet.allOf(NameNodeLayoutVersion.Feature.class)) {
if (prevF != null) {
assertTrue(String.format("Features %s and %s not listed in order of " +
"minimum compatible layout version.", prevF, f),
f.getInfo().getMinimumCompatibleLayoutVersion() <=
prevF.getInfo().getMinimumCompatibleLayoutVersion());
} else {
prevF = f;
}
}
}
/**
* Tests that attempting to add a new NameNode feature out of order with
* respect to minimum compatible layout version will fail fast.
*/
@Test(expected=AssertionError.class)
public void testNameNodeFeatureMinimumCompatibleLayoutVersionOutOfOrder() {
FeatureInfo ancestorF = LayoutVersion.Feature.RESERVED_REL2_4_0.getInfo();
LayoutFeature f = mock(LayoutFeature.class);
when(f.getInfo()).thenReturn(new FeatureInfo(
ancestorF.getLayoutVersion() - 1, ancestorF.getLayoutVersion(),
ancestorF.getMinimumCompatibleLayoutVersion() + 1, "Invalid feature.",
false));
Map<Integer, SortedSet<LayoutFeature>> features = new HashMap<>();
LayoutVersion.updateMap(features, LayoutVersion.Feature.values());
LayoutVersion.updateMap(features, new LayoutFeature[] { f });
}
/**
* Asserts the current minimum compatible layout version of the software, if a
* release were created from the codebase right now. This test is meant to
* make developers stop and reconsider if they introduce a change that requires
* a new minimum compatible layout version. This would make downgrade
* impossible.
*/
@Test
public void testCurrentMinimumCompatibleLayoutVersion() {
int expectedMinCompatLV = NameNodeLayoutVersion.Feature.TRUNCATE.getInfo()
.getLayoutVersion();
int actualMinCompatLV = LayoutVersion.getMinimumCompatibleLayoutVersion(
NameNodeLayoutVersion.Feature.values());
assertEquals("The minimum compatible layout version has changed. " +
"Downgrade to prior versions is no longer possible. Please either " +
"restore compatibility, or if the incompatibility is intentional, " +
"then update this assertion.", expectedMinCompatLV, actualMinCompatLV);
}
/**
* Given feature {@code f}, ensures the layout version of that feature
* supports all the features supported by it's ancestor.
*/
private void validateFeatureList(LayoutFeature f) {
final FeatureInfo info = f.getInfo();
int lv = info.getLayoutVersion();
int ancestorLV = info.getAncestorLayoutVersion();
SortedSet<LayoutFeature> ancestorSet = NameNodeLayoutVersion.getFeatures(ancestorLV);
assertNotNull(ancestorSet);
for (LayoutFeature feature : ancestorSet) {
assertTrue("LV " + lv + " does nto support " + feature
+ " supported by the ancestor LV " + info.getAncestorLayoutVersion(),
NameNodeLayoutVersion.supports(feature, lv));
}
}
/**
* When a LayoutVersion support SNAPSHOT, it must support
* FSIMAGE_NAME_OPTIMIZATION.
*/
@Test
public void testSNAPSHOT() {
for(Feature f : Feature.values()) {
final int version = f.getInfo().getLayoutVersion();
if (NameNodeLayoutVersion.supports(Feature.SNAPSHOT, version)) {
assertTrue(NameNodeLayoutVersion.supports(
Feature.FSIMAGE_NAME_OPTIMIZATION, version));
}
}
}
}
| 9,507 | 38.94958 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestBlockListAsLongs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolPB;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
import org.apache.hadoop.hdfs.server.datanode.Replica;
import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten;
import org.apache.hadoop.hdfs.server.datanode.ReplicaWaitingToBeRecovered;
import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo.Capability;
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
import org.junit.Test;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import com.google.protobuf.ByteString;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
public class TestBlockListAsLongs {
static Block b1 = new Block(1, 11, 111);
static Block b2 = new Block(2, 22, 222);
static Block b3 = new Block(3, 33, 333);
static Block b4 = new Block(4, 44, 444);
@Test
public void testEmptyReport() {
BlockListAsLongs blocks = checkReport();
assertArrayEquals(
new long[] {
0, 0,
-1, -1, -1 },
blocks.getBlockListAsLongs());
}
@Test
public void testFinalized() {
BlockListAsLongs blocks = checkReport(
new FinalizedReplica(b1, null, null));
assertArrayEquals(
new long[] {
1, 0,
1, 11, 111,
-1, -1, -1 },
blocks.getBlockListAsLongs());
}
@Test
public void testUc() {
BlockListAsLongs blocks = checkReport(
new ReplicaBeingWritten(b1, null, null, null));
assertArrayEquals(
new long[] {
0, 1,
-1, -1, -1,
1, 11, 111, ReplicaState.RBW.getValue() },
blocks.getBlockListAsLongs());
}
@Test
public void testMix() {
BlockListAsLongs blocks = checkReport(
new FinalizedReplica(b1, null, null),
new FinalizedReplica(b2, null, null),
new ReplicaBeingWritten(b3, null, null, null),
new ReplicaWaitingToBeRecovered(b4, null, null));
assertArrayEquals(
new long[] {
2, 2,
1, 11, 111,
2, 22, 222,
-1, -1, -1,
3, 33, 333, ReplicaState.RBW.getValue(),
4, 44, 444, ReplicaState.RWR.getValue() },
blocks.getBlockListAsLongs());
}
@Test
public void testFuzz() throws InterruptedException {
Replica[] replicas = new Replica[100000];
Random rand = new Random(0);
for (int i=0; i<replicas.length; i++) {
Block b = new Block(rand.nextLong(), i, i<<4);
switch (rand.nextInt(2)) {
case 0:
replicas[i] = new FinalizedReplica(b, null, null);
break;
case 1:
replicas[i] = new ReplicaBeingWritten(b, null, null, null);
break;
case 2:
replicas[i] = new ReplicaWaitingToBeRecovered(b, null, null);
break;
}
}
checkReport(replicas);
}
private BlockListAsLongs checkReport(Replica...replicas) {
Map<Long, Replica> expectedReplicas = new HashMap<>();
for (Replica replica : replicas) {
expectedReplicas.put(replica.getBlockId(), replica);
}
expectedReplicas = Collections.unmodifiableMap(expectedReplicas);
// encode the blocks and extract the buffers
BlockListAsLongs blocks =
BlockListAsLongs.encode(expectedReplicas.values());
List<ByteString> buffers = blocks.getBlocksBuffers();
// convert to old-style list of longs
List<Long> longs = new ArrayList<Long>();
for (long value : blocks.getBlockListAsLongs()) {
longs.add(value);
}
// decode the buffers and verify its contents
BlockListAsLongs decodedBlocks =
BlockListAsLongs.decodeBuffers(expectedReplicas.size(), buffers);
checkReplicas(expectedReplicas, decodedBlocks);
// decode the long and verify its contents
BlockListAsLongs decodedList = BlockListAsLongs.decodeLongs(longs);
checkReplicas(expectedReplicas, decodedList);
return blocks;
}
private void checkReplicas(Map<Long,Replica> expectedReplicas,
BlockListAsLongs decodedBlocks) {
assertEquals(expectedReplicas.size(), decodedBlocks.getNumberOfBlocks());
Map<Long, Replica> reportReplicas = new HashMap<>(expectedReplicas);
for (BlockReportReplica replica : decodedBlocks) {
assertNotNull(replica);
Replica expected = reportReplicas.remove(replica.getBlockId());
assertNotNull(expected);
assertEquals("wrong bytes",
expected.getNumBytes(), replica.getNumBytes());
assertEquals("wrong genstamp",
expected.getGenerationStamp(), replica.getGenerationStamp());
assertEquals("wrong replica state",
expected.getState(), replica.getState());
}
assertTrue(reportReplicas.isEmpty());
}
@Test
public void testDatanodeDetect() throws ServiceException, IOException {
final AtomicReference<BlockReportRequestProto> request =
new AtomicReference<>();
// just capture the outgoing PB
DatanodeProtocolPB mockProxy = mock(DatanodeProtocolPB.class);
doAnswer(new Answer<BlockReportResponseProto>() {
public BlockReportResponseProto answer(InvocationOnMock invocation) {
Object[] args = invocation.getArguments();
request.set((BlockReportRequestProto) args[1]);
return BlockReportResponseProto.newBuilder().build();
}
}).when(mockProxy).blockReport(any(RpcController.class),
any(BlockReportRequestProto.class));
@SuppressWarnings("resource")
DatanodeProtocolClientSideTranslatorPB nn =
new DatanodeProtocolClientSideTranslatorPB(mockProxy);
DatanodeRegistration reg = DFSTestUtil.getLocalDatanodeRegistration();
NamespaceInfo nsInfo = new NamespaceInfo(1, "cluster", "bp", 1);
reg.setNamespaceInfo(nsInfo);
Replica r = new FinalizedReplica(new Block(1, 2, 3), null, null);
BlockListAsLongs bbl = BlockListAsLongs.encode(Collections.singleton(r));
DatanodeStorage storage = new DatanodeStorage("s1");
StorageBlockReport[] sbr = { new StorageBlockReport(storage, bbl) };
// check DN sends new-style BR
request.set(null);
nsInfo.setCapabilities(Capability.STORAGE_BLOCK_REPORT_BUFFERS.getMask());
nn.blockReport(reg, "pool", sbr,
new BlockReportContext(1, 0, System.nanoTime(), 0L));
BlockReportRequestProto proto = request.get();
assertNotNull(proto);
assertTrue(proto.getReports(0).getBlocksList().isEmpty());
assertFalse(proto.getReports(0).getBlocksBuffersList().isEmpty());
// back up to prior version and check DN sends old-style BR
request.set(null);
nsInfo.setCapabilities(Capability.UNKNOWN.getMask());
nn.blockReport(reg, "pool", sbr,
new BlockReportContext(1, 0, System.nanoTime(), 0L));
proto = request.get();
assertNotNull(proto);
assertFalse(proto.getReports(0).getBlocksList().isEmpty());
assertTrue(proto.getReports(0).getBlocksBuffersList().isEmpty());
}
}
| 9,199 | 37.174274 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestAnnotations.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import java.lang.reflect.Method;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.io.retry.AtMostOnce;
import org.apache.hadoop.io.retry.Idempotent;
import org.junit.Assert;
import org.junit.Test;
/**
* Tests to make sure all the protocol class public methods have
* either {@link Idempotent} or {@link AtMostOnce} once annotations.
*/
public class TestAnnotations {
@Test
public void checkAnnotations() {
Method[] methods = NamenodeProtocols.class.getMethods();
for (Method m : methods) {
Assert.assertTrue(
"Idempotent or AtMostOnce annotation is not present " + m,
m.isAnnotationPresent(Idempotent.class)
|| m.isAnnotationPresent(AtMostOnce.class));
}
}
}
| 1,612 | 35.659091 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestExtendedBlock.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import org.junit.Test;
public class TestExtendedBlock {
static final String POOL_A = "blockpool-a";
static final String POOL_B = "blockpool-b";
static final Block BLOCK_1_GS1 = new Block(1L, 100L, 1L);
static final Block BLOCK_1_GS2 = new Block(1L, 100L, 2L);
static final Block BLOCK_2_GS1 = new Block(2L, 100L, 1L);
@Test
public void testEquals() {
// Same block -> equal
assertEquals(
new ExtendedBlock(POOL_A, BLOCK_1_GS1),
new ExtendedBlock(POOL_A, BLOCK_1_GS1));
// Different pools, same block id -> not equal
assertNotEquals(
new ExtendedBlock(POOL_A, BLOCK_1_GS1),
new ExtendedBlock(POOL_B, BLOCK_1_GS1));
// Same pool, different block id -> not equal
assertNotEquals(
new ExtendedBlock(POOL_A, BLOCK_1_GS1),
new ExtendedBlock(POOL_A, BLOCK_2_GS1));
// Same block, different genstamps -> equal
assertEquals(
new ExtendedBlock(POOL_A, BLOCK_1_GS1),
new ExtendedBlock(POOL_A, BLOCK_1_GS2));
}
@Test
public void testHashcode() {
// Different pools, same block id -> different hashcode
assertNotEquals(
new ExtendedBlock(POOL_A, BLOCK_1_GS1).hashCode(),
new ExtendedBlock(POOL_B, BLOCK_1_GS1).hashCode());
// Same pool, different block id -> different hashcode
assertNotEquals(
new ExtendedBlock(POOL_A, BLOCK_1_GS1).hashCode(),
new ExtendedBlock(POOL_A, BLOCK_2_GS1).hashCode());
// Same block -> same hashcode
assertEquals(
new ExtendedBlock(POOL_A, BLOCK_1_GS1).hashCode(),
new ExtendedBlock(POOL_A, BLOCK_1_GS1).hashCode());
}
private static void assertNotEquals(Object a, Object b) {
assertFalse("expected not equal: '" + a + "' and '" + b + "'",
a.equals(b));
}
}
| 2,744 | 34.192308 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/TestPacketReceiver.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol.datatransfer;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import org.apache.hadoop.hdfs.AppendTestUtil;
import org.junit.Test;
import org.mockito.Mockito;
import com.google.common.primitives.Ints;
import static org.junit.Assert.*;
public class TestPacketReceiver {
private static final long OFFSET_IN_BLOCK = 12345L;
private static final int SEQNO = 54321;
private byte[] prepareFakePacket(byte[] data, byte[] sums) throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(baos);
int packetLen = data.length + sums.length + Ints.BYTES;
PacketHeader header = new PacketHeader(
packetLen, OFFSET_IN_BLOCK, SEQNO, false, data.length, false);
header.write(dos);
dos.write(sums);
dos.write(data);
dos.flush();
return baos.toByteArray();
}
private static byte[] remainingAsArray(ByteBuffer buf) {
byte[] b = new byte[buf.remaining()];
buf.get(b);
return b;
}
@Test
public void testReceiveAndMirror() throws IOException {
PacketReceiver pr = new PacketReceiver(false);
// Test three different lengths, to force reallocing
// the buffer as it grows.
doTestReceiveAndMirror(pr, 100, 10);
doTestReceiveAndMirror(pr, 50, 10);
doTestReceiveAndMirror(pr, 150, 10);
pr.close();
}
private void doTestReceiveAndMirror(PacketReceiver pr,
int dataLen, int checksumsLen) throws IOException {
final byte[] DATA = AppendTestUtil.initBuffer(dataLen);
final byte[] CHECKSUMS = AppendTestUtil.initBuffer(checksumsLen);
byte[] packet = prepareFakePacket(DATA, CHECKSUMS);
ByteArrayInputStream in = new ByteArrayInputStream(packet);
pr.receiveNextPacket(in);
ByteBuffer parsedData = pr.getDataSlice();
assertArrayEquals(DATA, remainingAsArray(parsedData));
ByteBuffer parsedChecksums = pr.getChecksumSlice();
assertArrayEquals(CHECKSUMS, remainingAsArray(parsedChecksums));
PacketHeader header = pr.getHeader();
assertEquals(SEQNO, header.getSeqno());
assertEquals(OFFSET_IN_BLOCK, header.getOffsetInBlock());
assertEquals(dataLen + checksumsLen + Ints.BYTES, header.getPacketLen());
// Mirror the packet to an output stream and make sure it matches
// the packet we sent.
ByteArrayOutputStream mirrored = new ByteArrayOutputStream();
mirrored = Mockito.spy(mirrored);
pr.mirrorPacketTo(new DataOutputStream(mirrored));
// The write should be done in a single call. Otherwise we may hit
// nasty interactions with nagling (eg HDFS-4049).
Mockito.verify(mirrored, Mockito.times(1))
.write(Mockito.<byte[]>any(), Mockito.anyInt(),
Mockito.eq(packet.length));
Mockito.verifyNoMoreInteractions(mirrored);
assertArrayEquals(packet, mirrored.toByteArray());
}
}
| 3,835 | 33.872727 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferTestCase.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol.datatransfer.sasl;
import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HTTP_POLICY_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY;
import static org.junit.Assert.*;
import java.io.File;
import java.util.Properties;
import org.apache.commons.lang.RandomStringUtils;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.minikdc.MiniKdc;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
import org.junit.AfterClass;
import org.junit.BeforeClass;
public abstract class SaslDataTransferTestCase {
private static File baseDir;
private static String hdfsPrincipal;
private static String userPrincipal;
private static MiniKdc kdc;
private static String hdfsKeytab;
private static String userKeyTab;
private static String spnegoPrincipal;
public static String getUserKeyTab() {
return userKeyTab;
}
public static String getUserPrincipal() {
return userPrincipal;
}
public static String getHdfsPrincipal() {
return hdfsPrincipal;
}
public static String getHdfsKeytab() {
return hdfsKeytab;
}
@BeforeClass
public static void initKdc() throws Exception {
baseDir = new File(System.getProperty("test.build.dir", "target/test-dir"),
SaslDataTransferTestCase.class.getSimpleName());
FileUtil.fullyDelete(baseDir);
assertTrue(baseDir.mkdirs());
Properties kdcConf = MiniKdc.createConf();
kdc = new MiniKdc(kdcConf, baseDir);
kdc.start();
String userName = RandomStringUtils.randomAlphabetic(8);
File userKeytabFile = new File(baseDir, userName + ".keytab");
userKeyTab = userKeytabFile.getAbsolutePath();
kdc.createPrincipal(userKeytabFile, userName + "/localhost");
userPrincipal = userName + "/localhost@" + kdc.getRealm();
String superUserName = "hdfs";
File hdfsKeytabFile = new File(baseDir, superUserName + ".keytab");
hdfsKeytab = hdfsKeytabFile.getAbsolutePath();
kdc.createPrincipal(hdfsKeytabFile, superUserName + "/localhost", "HTTP/localhost");
hdfsPrincipal = superUserName + "/localhost@" + kdc.getRealm();
spnegoPrincipal = "HTTP/localhost@" + kdc.getRealm();
}
@AfterClass
public static void shutdownKdc() {
if (kdc != null) {
kdc.stop();
}
FileUtil.fullyDelete(baseDir);
}
/**
* Creates configuration for starting a secure cluster.
*
* @param dataTransferProtection supported QOPs
* @return configuration for starting a secure cluster
* @throws Exception if there is any failure
*/
protected HdfsConfiguration createSecureConfig(
String dataTransferProtection) throws Exception {
HdfsConfiguration conf = new HdfsConfiguration();
SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf);
conf.set(DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
conf.set(DFS_NAMENODE_KEYTAB_FILE_KEY, hdfsKeytab);
conf.set(DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
conf.set(DFS_DATANODE_KEYTAB_FILE_KEY, hdfsKeytab);
conf.set(DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, spnegoPrincipal);
conf.setBoolean(DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
conf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, dataTransferProtection);
conf.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
conf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
conf.set(DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY, 10);
String keystoresDir = baseDir.getAbsolutePath();
String sslConfDir = KeyStoreTestUtil.getClasspathDir(this.getClass());
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
return conf;
}
}
| 5,566 | 40.237037 | 102 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol.datatransfer.sasl;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HTTP_POLICY_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.IGNORE_SECURE_PORTS_FOR_TESTING_KEY;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import java.io.IOException;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import org.junit.After;
import org.junit.Assert;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import org.junit.rules.Timeout;
public class TestSaslDataTransfer extends SaslDataTransferTestCase {
private static final int BLOCK_SIZE = 4096;
private static final int NUM_BLOCKS = 3;
private static final Path PATH = new Path("/file1");
private MiniDFSCluster cluster;
private FileSystem fs;
@Rule
public ExpectedException exception = ExpectedException.none();
@Rule
public Timeout timeout = new Timeout(60000);
@After
public void shutdown() {
IOUtils.cleanup(null, fs);
if (cluster != null) {
cluster.shutdown();
}
}
@Test
public void testAuthentication() throws Exception {
HdfsConfiguration clusterConf = createSecureConfig(
"authentication,integrity,privacy");
startCluster(clusterConf);
HdfsConfiguration clientConf = new HdfsConfiguration(clusterConf);
clientConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, "authentication");
doTest(clientConf);
}
@Test
public void testIntegrity() throws Exception {
HdfsConfiguration clusterConf = createSecureConfig(
"authentication,integrity,privacy");
startCluster(clusterConf);
HdfsConfiguration clientConf = new HdfsConfiguration(clusterConf);
clientConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, "integrity");
doTest(clientConf);
}
@Test
public void testPrivacy() throws Exception {
HdfsConfiguration clusterConf = createSecureConfig(
"authentication,integrity,privacy");
startCluster(clusterConf);
HdfsConfiguration clientConf = new HdfsConfiguration(clusterConf);
clientConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, "privacy");
doTest(clientConf);
}
@Test
public void testClientAndServerDoNotHaveCommonQop() throws Exception {
HdfsConfiguration clusterConf = createSecureConfig("privacy");
startCluster(clusterConf);
HdfsConfiguration clientConf = new HdfsConfiguration(clusterConf);
clientConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, "authentication");
exception.expect(IOException.class);
exception.expectMessage("could only be replicated to 0 nodes");
doTest(clientConf);
}
@Test
public void testServerSaslNoClientSasl() throws Exception {
HdfsConfiguration clusterConf = createSecureConfig(
"authentication,integrity,privacy");
// Set short retry timeouts so this test runs faster
clusterConf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
startCluster(clusterConf);
HdfsConfiguration clientConf = new HdfsConfiguration(clusterConf);
clientConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, "");
LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
LogFactory.getLog(DataNode.class));
try {
doTest(clientConf);
Assert.fail("Should fail if SASL data transfer protection is not " +
"configured or not supported in client");
} catch (IOException e) {
GenericTestUtils.assertMatches(e.getMessage(),
"could only be replicated to 0 nodes");
} finally {
logs.stopCapturing();
}
GenericTestUtils.assertMatches(logs.getOutput(),
"Failed to read expected SASL data transfer protection " +
"handshake from client at");
}
@Test
public void testDataNodeAbortsIfNoSasl() throws Exception {
HdfsConfiguration clusterConf = createSecureConfig("");
exception.expect(RuntimeException.class);
exception.expectMessage("Cannot start secure DataNode");
startCluster(clusterConf);
}
@Test
public void testDataNodeAbortsIfNotHttpsOnly() throws Exception {
HdfsConfiguration clusterConf = createSecureConfig("authentication");
clusterConf.set(DFS_HTTP_POLICY_KEY,
HttpConfig.Policy.HTTP_AND_HTTPS.name());
exception.expect(RuntimeException.class);
exception.expectMessage("Cannot start secure DataNode");
startCluster(clusterConf);
}
@Test
public void testNoSaslAndSecurePortsIgnored() throws Exception {
HdfsConfiguration clusterConf = createSecureConfig("");
clusterConf.setBoolean(IGNORE_SECURE_PORTS_FOR_TESTING_KEY, true);
startCluster(clusterConf);
doTest(clusterConf);
}
/**
* Tests DataTransferProtocol with the given client configuration.
*
* @param conf client configuration
* @throws IOException if there is an I/O error
*/
private void doTest(HdfsConfiguration conf) throws IOException {
fs = FileSystem.get(cluster.getURI(), conf);
FileSystemTestHelper.createFile(fs, PATH, NUM_BLOCKS, BLOCK_SIZE);
assertArrayEquals(FileSystemTestHelper.getFileData(NUM_BLOCKS, BLOCK_SIZE),
DFSTestUtil.readFile(fs, PATH).getBytes("UTF-8"));
BlockLocation[] blockLocations = fs.getFileBlockLocations(PATH, 0,
Long.MAX_VALUE);
assertNotNull(blockLocations);
assertEquals(NUM_BLOCKS, blockLocations.length);
for (BlockLocation blockLocation: blockLocations) {
assertNotNull(blockLocation.getHosts());
assertEquals(3, blockLocation.getHosts().length);
}
}
/**
* Starts a cluster with the given configuration.
*
* @param conf cluster configuration
* @throws IOException if there is an I/O error
*/
private void startCluster(HdfsConfiguration conf) throws IOException {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
}
}
| 7,356 | 35.785 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestSecureNNWithQJM.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.qjournal;
import static org.junit.Assert.*;
import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HTTP_POLICY_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_JOURNALNODE_HTTPS_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_JOURNALNODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_JOURNALNODE_KERBEROS_PRINCIPAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_JOURNALNODE_KEYTAB_FILE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY;
import java.io.File;
import java.io.IOException;
import java.util.Properties;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.minikdc.MiniKdc;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
public class TestSecureNNWithQJM {
private static final Path TEST_PATH = new Path("/test-dir");
private static final Path TEST_PATH_2 = new Path("/test-dir-2");
private static HdfsConfiguration baseConf;
private static File baseDir;
private static MiniKdc kdc;
private MiniDFSCluster cluster;
private HdfsConfiguration conf;
private FileSystem fs;
private MiniJournalCluster mjc;
@Rule
public Timeout timeout = new Timeout(30000);
@BeforeClass
public static void init() throws Exception {
baseDir = new File(System.getProperty("test.build.dir", "target/test-dir"),
TestSecureNNWithQJM.class.getSimpleName());
FileUtil.fullyDelete(baseDir);
assertTrue(baseDir.mkdirs());
Properties kdcConf = MiniKdc.createConf();
kdc = new MiniKdc(kdcConf, baseDir);
kdc.start();
baseConf = new HdfsConfiguration();
SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS,
baseConf);
UserGroupInformation.setConfiguration(baseConf);
assertTrue("Expected configuration to enable security",
UserGroupInformation.isSecurityEnabled());
String userName = UserGroupInformation.getLoginUser().getShortUserName();
File keytabFile = new File(baseDir, userName + ".keytab");
String keytab = keytabFile.getAbsolutePath();
// Windows will not reverse name lookup "127.0.0.1" to "localhost".
String krbInstance = Path.WINDOWS ? "127.0.0.1" : "localhost";
kdc.createPrincipal(keytabFile,
userName + "/" + krbInstance,
"HTTP/" + krbInstance);
String hdfsPrincipal = userName + "/" + krbInstance + "@" + kdc.getRealm();
String spnegoPrincipal = "HTTP/" + krbInstance + "@" + kdc.getRealm();
baseConf.set(DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
baseConf.set(DFS_NAMENODE_KEYTAB_FILE_KEY, keytab);
baseConf.set(DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
baseConf.set(DFS_DATANODE_KEYTAB_FILE_KEY, keytab);
baseConf.set(DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, spnegoPrincipal);
baseConf.set(DFS_JOURNALNODE_KEYTAB_FILE_KEY, keytab);
baseConf.set(DFS_JOURNALNODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
baseConf.set(DFS_JOURNALNODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY,
spnegoPrincipal);
baseConf.setBoolean(DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
baseConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, "authentication");
baseConf.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
baseConf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
baseConf.set(DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
baseConf.set(DFS_JOURNALNODE_HTTPS_ADDRESS_KEY, "localhost:0");
baseConf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY, 10);
String keystoresDir = baseDir.getAbsolutePath();
String sslConfDir = KeyStoreTestUtil.getClasspathDir(
TestSecureNNWithQJM.class);
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, baseConf, false);
}
@AfterClass
public static void destroy() {
if (kdc != null) {
kdc.stop();
}
FileUtil.fullyDelete(baseDir);
}
@Before
public void setup() throws Exception {
conf = new HdfsConfiguration(baseConf);
}
@After
public void shutdown() throws IOException {
IOUtils.cleanup(null, fs);
if (cluster != null) {
cluster.shutdown();
}
if (mjc != null) {
mjc.shutdown();
}
}
@Test
public void testSecureMode() throws Exception {
doNNWithQJMTest();
}
@Test
public void testSecondaryNameNodeHttpAddressNotNeeded() throws Exception {
conf.set(DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "null");
doNNWithQJMTest();
}
/**
* Tests use of QJM with the defined cluster.
*
* @throws IOException if there is an I/O error
*/
private void doNNWithQJMTest() throws IOException {
startCluster();
assertTrue(fs.mkdirs(TEST_PATH));
// Restart the NN and make sure the edit was persisted
// and loaded again
restartNameNode();
assertTrue(fs.exists(TEST_PATH));
assertTrue(fs.mkdirs(TEST_PATH_2));
// Restart the NN again and make sure both edits are persisted.
restartNameNode();
assertTrue(fs.exists(TEST_PATH));
assertTrue(fs.exists(TEST_PATH_2));
}
/**
* Restarts the NameNode and obtains a new FileSystem.
*
* @throws IOException if there is an I/O error
*/
private void restartNameNode() throws IOException {
IOUtils.cleanup(null, fs);
cluster.restartNameNode();
fs = cluster.getFileSystem();
}
/**
* Starts a cluster using QJM with the defined configuration.
*
* @throws IOException if there is an I/O error
*/
private void startCluster() throws IOException {
mjc = new MiniJournalCluster.Builder(conf)
.build();
conf.set(DFS_NAMENODE_EDITS_DIR_KEY,
mjc.getQuorumJournalURI("myjournal").toString());
cluster = new MiniDFSCluster.Builder(conf)
.build();
cluster.waitActive();
fs = cluster.getFileSystem();
}
}
| 8,217 | 36.870968 | 106 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.qjournal;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
import java.io.IOException;
import java.net.BindException;
import java.net.URI;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider;
public class MiniQJMHACluster {
private MiniDFSCluster cluster;
private MiniJournalCluster journalCluster;
private final Configuration conf;
private static final Log LOG = LogFactory.getLog(MiniQJMHACluster.class);
public static final String NAMESERVICE = "ns1";
private static final String NN1 = "nn1";
private static final String NN2 = "nn2";
private static final Random RANDOM = new Random();
private int basePort = 10000;
public static class Builder {
private final Configuration conf;
private StartupOption startOpt = null;
private final MiniDFSCluster.Builder dfsBuilder;
public Builder(Configuration conf) {
this.conf = conf;
// most QJMHACluster tests don't need DataNodes, so we'll make
// this the default
this.dfsBuilder = new MiniDFSCluster.Builder(conf).numDataNodes(0);
}
public MiniDFSCluster.Builder getDfsBuilder() {
return dfsBuilder;
}
public MiniQJMHACluster build() throws IOException {
return new MiniQJMHACluster(this);
}
public void startupOption(StartupOption startOpt) {
this.startOpt = startOpt;
}
}
public static MiniDFSNNTopology createDefaultTopology(int basePort) {
return new MiniDFSNNTopology()
.addNameservice(new MiniDFSNNTopology.NSConf(NAMESERVICE).addNN(
new MiniDFSNNTopology.NNConf("nn1").setIpcPort(basePort)
.setHttpPort(basePort + 1)).addNN(
new MiniDFSNNTopology.NNConf("nn2").setIpcPort(basePort + 2)
.setHttpPort(basePort + 3)));
}
private MiniQJMHACluster(Builder builder) throws IOException {
this.conf = builder.conf;
int retryCount = 0;
while (true) {
try {
basePort = 10000 + RANDOM.nextInt(1000) * 4;
// start 3 journal nodes
journalCluster = new MiniJournalCluster.Builder(conf).format(true)
.build();
URI journalURI = journalCluster.getQuorumJournalURI(NAMESERVICE);
// start cluster with 2 NameNodes
MiniDFSNNTopology topology = createDefaultTopology(basePort);
initHAConf(journalURI, builder.conf);
// First start up the NNs just to format the namespace. The MinIDFSCluster
// has no way to just format the NameNodes without also starting them.
cluster = builder.dfsBuilder.nnTopology(topology)
.manageNameDfsSharedDirs(false).build();
cluster.waitActive();
cluster.shutdownNameNodes();
// initialize the journal nodes
Configuration confNN0 = cluster.getConfiguration(0);
NameNode.initializeSharedEdits(confNN0, true);
cluster.getNameNodeInfos()[0].setStartOpt(builder.startOpt);
cluster.getNameNodeInfos()[1].setStartOpt(builder.startOpt);
// restart the cluster
cluster.restartNameNodes();
++retryCount;
break;
} catch (BindException e) {
LOG.info("MiniQJMHACluster port conflicts, retried " +
retryCount + " times");
}
}
}
private Configuration initHAConf(URI journalURI, Configuration conf) {
conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,
journalURI.toString());
String address1 = "127.0.0.1:" + basePort;
String address2 = "127.0.0.1:" + (basePort + 2);
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,
NAMESERVICE, NN1), address1);
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,
NAMESERVICE, NN2), address2);
conf.set(DFSConfigKeys.DFS_NAMESERVICES, NAMESERVICE);
conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, NAMESERVICE),
NN1 + "," + NN2);
conf.set(HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX + "." + NAMESERVICE,
ConfiguredFailoverProxyProvider.class.getName());
conf.set("fs.defaultFS", "hdfs://" + NAMESERVICE);
return conf;
}
public MiniDFSCluster getDfsCluster() {
return cluster;
}
public MiniJournalCluster getJournalCluster() {
return journalCluster;
}
public void shutdown() throws IOException {
cluster.shutdown();
journalCluster.shutdown();
}
}
| 5,858 | 35.61875 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestMiniJournalCluster.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.qjournal;
import static org.junit.Assert.*;
import java.io.File;
import java.io.IOException;
import java.net.URI;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.qjournal.server.JournalNode;
import org.junit.Test;
public class TestMiniJournalCluster {
@Test
public void testStartStop() throws IOException {
Configuration conf = new Configuration();
MiniJournalCluster c = new MiniJournalCluster.Builder(conf)
.build();
try {
URI uri = c.getQuorumJournalURI("myjournal");
String[] addrs = uri.getAuthority().split(";");
assertEquals(3, addrs.length);
JournalNode node = c.getJournalNode(0);
String dir = node.getConf().get(DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_KEY);
assertEquals(
new File(MiniDFSCluster.getBaseDirectory() + "journalnode-0")
.getAbsolutePath(),
dir);
} finally {
c.shutdown();
}
}
}
| 1,876 | 33.127273 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/QJMTestUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.qjournal;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import java.io.Closeable;
import java.io.File;
import java.io.IOException;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import org.apache.hadoop.hdfs.qjournal.client.QuorumJournalManager;
import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream;
import org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes;
import org.apache.hadoop.hdfs.server.namenode.NNStorage;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
import org.apache.hadoop.hdfs.server.namenode.TestEditLog;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.IOUtils;
import com.google.common.collect.Lists;
public abstract class QJMTestUtil {
public static final NamespaceInfo FAKE_NSINFO = new NamespaceInfo(
12345, "mycluster", "my-bp", 0L);
public static final String JID = "test-journal";
public static byte[] createTxnData(int startTxn, int numTxns) throws Exception {
DataOutputBuffer buf = new DataOutputBuffer();
FSEditLogOp.Writer writer = new FSEditLogOp.Writer(buf);
for (long txid = startTxn; txid < startTxn + numTxns; txid++) {
FSEditLogOp op = NameNodeAdapter.createMkdirOp("tx " + txid);
op.setTransactionId(txid);
writer.writeOp(op);
}
return Arrays.copyOf(buf.getData(), buf.getLength());
}
/**
* Generate byte array representing a set of GarbageMkdirOp
*/
public static byte[] createGabageTxns(long startTxId, int numTxns)
throws IOException {
DataOutputBuffer buf = new DataOutputBuffer();
FSEditLogOp.Writer writer = new FSEditLogOp.Writer(buf);
for (long txid = startTxId; txid < startTxId + numTxns; txid++) {
FSEditLogOp op = new TestEditLog.GarbageMkdirOp();
op.setTransactionId(txid);
writer.writeOp(op);
}
return Arrays.copyOf(buf.getData(), buf.getLength());
}
public static EditLogOutputStream writeSegment(MiniJournalCluster cluster,
QuorumJournalManager qjm, long startTxId, int numTxns,
boolean finalize) throws IOException {
EditLogOutputStream stm = qjm.startLogSegment(startTxId,
NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
// Should create in-progress
assertExistsInQuorum(cluster,
NNStorage.getInProgressEditsFileName(startTxId));
writeTxns(stm, startTxId, numTxns);
if (finalize) {
stm.close();
qjm.finalizeLogSegment(startTxId, startTxId + numTxns - 1);
return null;
} else {
return stm;
}
}
public static void writeOp(EditLogOutputStream stm, long txid) throws IOException {
FSEditLogOp op = NameNodeAdapter.createMkdirOp("tx " + txid);
op.setTransactionId(txid);
stm.write(op);
}
public static void writeTxns(EditLogOutputStream stm, long startTxId, int numTxns)
throws IOException {
for (long txid = startTxId; txid < startTxId + numTxns; txid++) {
writeOp(stm, txid);
}
stm.setReadyToFlush();
stm.flush();
}
/**
* Verify that the given list of streams contains exactly the range of
* transactions specified, inclusive.
*/
public static void verifyEdits(List<EditLogInputStream> streams,
int firstTxnId, int lastTxnId) throws IOException {
Iterator<EditLogInputStream> iter = streams.iterator();
assertTrue(iter.hasNext());
EditLogInputStream stream = iter.next();
for (int expected = firstTxnId;
expected <= lastTxnId;
expected++) {
FSEditLogOp op = stream.readOp();
while (op == null) {
assertTrue("Expected to find txid " + expected + ", " +
"but no more streams available to read from",
iter.hasNext());
stream = iter.next();
op = stream.readOp();
}
assertEquals(FSEditLogOpCodes.OP_MKDIR, op.opCode);
assertEquals(expected, op.getTransactionId());
}
assertNull(stream.readOp());
assertFalse("Expected no more txns after " + lastTxnId +
" but more streams are available", iter.hasNext());
}
public static void assertExistsInQuorum(MiniJournalCluster cluster,
String fname) {
int count = 0;
for (int i = 0; i < 3; i++) {
File dir = cluster.getCurrentDir(i, JID);
if (new File(dir, fname).exists()) {
count++;
}
}
assertTrue("File " + fname + " should exist in a quorum of dirs",
count >= cluster.getQuorumSize());
}
public static long recoverAndReturnLastTxn(QuorumJournalManager qjm)
throws IOException {
qjm.recoverUnfinalizedSegments();
long lastRecoveredTxn = 0;
List<EditLogInputStream> streams = Lists.newArrayList();
try {
qjm.selectInputStreams(streams, 0, false);
for (EditLogInputStream elis : streams) {
assertTrue(elis.getFirstTxId() > lastRecoveredTxn);
lastRecoveredTxn = elis.getLastTxId();
}
} finally {
IOUtils.cleanup(null, streams.toArray(new Closeable[0]));
}
return lastRecoveredTxn;
}
}
| 6,279 | 33.696133 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.qjournal;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.File;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.ExitUtil;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class TestNNWithQJM {
final Configuration conf = new HdfsConfiguration();
private MiniJournalCluster mjc = null;
private final Path TEST_PATH = new Path("/test-dir");
private final Path TEST_PATH_2 = new Path("/test-dir-2");
@Before
public void resetSystemExit() {
ExitUtil.resetFirstExitException();
}
@Before
public void startJNs() throws Exception {
mjc = new MiniJournalCluster.Builder(conf).build();
}
@After
public void stopJNs() throws Exception {
if (mjc != null) {
mjc.shutdown();
mjc = null;
}
}
@Test (timeout = 30000)
public void testLogAndRestart() throws IOException {
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
MiniDFSCluster.getBaseDirectory() + "/TestNNWithQJM/image");
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
mjc.getQuorumJournalURI("myjournal").toString());
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(0)
.manageNameDfsDirs(false)
.build();
try {
cluster.getFileSystem().mkdirs(TEST_PATH);
// Restart the NN and make sure the edit was persisted
// and loaded again
cluster.restartNameNode();
assertTrue(cluster.getFileSystem().exists(TEST_PATH));
cluster.getFileSystem().mkdirs(TEST_PATH_2);
// Restart the NN again and make sure both edits are persisted.
cluster.restartNameNode();
assertTrue(cluster.getFileSystem().exists(TEST_PATH));
assertTrue(cluster.getFileSystem().exists(TEST_PATH_2));
} finally {
cluster.shutdown();
}
}
@Test (timeout = 30000)
public void testNewNamenodeTakesOverWriter() throws Exception {
File nn1Dir = new File(
MiniDFSCluster.getBaseDirectory() + "/TestNNWithQJM/image-nn1");
File nn2Dir = new File(
MiniDFSCluster.getBaseDirectory() + "/TestNNWithQJM/image-nn2");
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
nn1Dir.getAbsolutePath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
mjc.getQuorumJournalURI("myjournal").toString());
// Start the cluster once to generate the dfs dirs
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(0)
.manageNameDfsDirs(false)
.checkExitOnShutdown(false)
.build();
// Shutdown the cluster before making a copy of the namenode dir
// to release all file locks, otherwise, the copy will fail on
// some platforms.
cluster.shutdown();
try {
// Start a second NN pointed to the same quorum.
// We need to copy the image dir from the first NN -- or else
// the new NN will just be rejected because of Namespace mismatch.
FileUtil.fullyDelete(nn2Dir);
FileUtil.copy(nn1Dir, FileSystem.getLocal(conf).getRaw(),
new Path(nn2Dir.getAbsolutePath()), false, conf);
// Start the cluster again
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(0)
.format(false)
.manageNameDfsDirs(false)
.checkExitOnShutdown(false)
.build();
cluster.getFileSystem().mkdirs(TEST_PATH);
Configuration conf2 = new Configuration();
conf2.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
nn2Dir.getAbsolutePath());
conf2.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
mjc.getQuorumJournalURI("myjournal").toString());
MiniDFSCluster cluster2 = new MiniDFSCluster.Builder(conf2)
.numDataNodes(0)
.format(false)
.manageNameDfsDirs(false)
.build();
// Check that the new cluster sees the edits made on the old cluster
try {
assertTrue(cluster2.getFileSystem().exists(TEST_PATH));
} finally {
cluster2.shutdown();
}
// Check that, if we try to write to the old NN
// that it aborts.
try {
cluster.getFileSystem().mkdirs(new Path("/x"));
fail("Did not abort trying to write to a fenced NN");
} catch (RemoteException re) {
GenericTestUtils.assertExceptionContains(
"Could not sync enough journals to persistent storage", re);
}
} finally {
//cluster.shutdown();
}
}
@Test (timeout = 30000)
public void testMismatchedNNIsRejected() throws Exception {
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
MiniDFSCluster.getBaseDirectory() + "/TestNNWithQJM/image");
String defaultEditsDir = conf.get(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY);
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
mjc.getQuorumJournalURI("myjournal").toString());
// Start a NN, so the storage is formatted -- both on-disk
// and QJM.
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(0)
.manageNameDfsDirs(false)
.build();
cluster.shutdown();
// Reformat just the on-disk portion
Configuration onDiskOnly = new Configuration(conf);
onDiskOnly.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, defaultEditsDir);
NameNode.format(onDiskOnly);
// Start the NN - should fail because the JNs are still formatted
// with the old namespace ID.
try {
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(0)
.manageNameDfsDirs(false)
.format(false)
.build();
fail("New NN with different namespace should have been rejected");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains(
"Unable to start log segment 1: too few journals", ioe);
}
}
}
| 7,144 | 33.853659 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.qjournal;
import java.io.File;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.qjournal.server.JournalNode;
import org.apache.hadoop.net.NetUtils;
import com.google.common.base.Joiner;
import com.google.common.collect.Lists;
public class MiniJournalCluster {
public static class Builder {
private String baseDir;
private int numJournalNodes = 3;
private boolean format = true;
private final Configuration conf;
public Builder(Configuration conf) {
this.conf = conf;
}
public Builder baseDir(String d) {
this.baseDir = d;
return this;
}
public Builder numJournalNodes(int n) {
this.numJournalNodes = n;
return this;
}
public Builder format(boolean f) {
this.format = f;
return this;
}
public MiniJournalCluster build() throws IOException {
return new MiniJournalCluster(this);
}
}
private static final class JNInfo {
private JournalNode node;
private final InetSocketAddress ipcAddr;
private final String httpServerURI;
private JNInfo(JournalNode node) {
this.node = node;
this.ipcAddr = node.getBoundIpcAddress();
this.httpServerURI = node.getHttpServerURI();
}
}
private static final Log LOG = LogFactory.getLog(MiniJournalCluster.class);
private final File baseDir;
private final JNInfo[] nodes;
private MiniJournalCluster(Builder b) throws IOException {
LOG.info("Starting MiniJournalCluster with " +
b.numJournalNodes + " journal nodes");
if (b.baseDir != null) {
this.baseDir = new File(b.baseDir);
} else {
this.baseDir = new File(MiniDFSCluster.getBaseDirectory());
}
nodes = new JNInfo[b.numJournalNodes];
for (int i = 0; i < b.numJournalNodes; i++) {
if (b.format) {
File dir = getStorageDir(i);
LOG.debug("Fully deleting JN directory " + dir);
FileUtil.fullyDelete(dir);
}
JournalNode jn = new JournalNode();
jn.setConf(createConfForNode(b, i));
jn.start();
nodes[i] = new JNInfo(jn);
}
}
/**
* Set up the given Configuration object to point to the set of JournalNodes
* in this cluster.
*/
public URI getQuorumJournalURI(String jid) {
List<String> addrs = Lists.newArrayList();
for (JNInfo info : nodes) {
addrs.add("127.0.0.1:" + info.ipcAddr.getPort());
}
String addrsVal = Joiner.on(";").join(addrs);
LOG.debug("Setting logger addresses to: " + addrsVal);
try {
return new URI("qjournal://" + addrsVal + "/" + jid);
} catch (URISyntaxException e) {
throw new AssertionError(e);
}
}
/**
* Start the JournalNodes in the cluster.
*/
public void start() throws IOException {
for (JNInfo info : nodes) {
info.node.start();
}
}
/**
* Shutdown all of the JournalNodes in the cluster.
* @throws IOException if one or more nodes failed to stop
*/
public void shutdown() throws IOException {
boolean failed = false;
for (JNInfo info : nodes) {
try {
info.node.stopAndJoin(0);
} catch (Exception e) {
failed = true;
LOG.warn("Unable to stop journal node " + info.node, e);
}
}
if (failed) {
throw new IOException("Unable to shut down. Check log for details");
}
}
private Configuration createConfForNode(Builder b, int idx) {
Configuration conf = new Configuration(b.conf);
File logDir = getStorageDir(idx);
conf.set(DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_KEY, logDir.toString());
conf.set(DFSConfigKeys.DFS_JOURNALNODE_RPC_ADDRESS_KEY, "localhost:0");
conf.set(DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_KEY, "localhost:0");
return conf;
}
public File getStorageDir(int idx) {
return new File(baseDir, "journalnode-" + idx).getAbsoluteFile();
}
public File getJournalDir(int idx, String jid) {
return new File(getStorageDir(idx), jid);
}
public File getCurrentDir(int idx, String jid) {
return new File(getJournalDir(idx, jid), "current");
}
public File getPreviousDir(int idx, String jid) {
return new File(getJournalDir(idx, jid), "previous");
}
public JournalNode getJournalNode(int i) {
return nodes[i].node;
}
public void restartJournalNode(int i) throws InterruptedException, IOException {
JNInfo info = nodes[i];
JournalNode jn = info.node;
Configuration conf = new Configuration(jn.getConf());
if (jn.isStarted()) {
jn.stopAndJoin(0);
}
conf.set(DFSConfigKeys.DFS_JOURNALNODE_RPC_ADDRESS_KEY,
NetUtils.getHostPortString(info.ipcAddr));
final String uri = info.httpServerURI;
if (uri.startsWith("http://")) {
conf.set(DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_KEY,
uri.substring(("http://".length())));
} else if (info.httpServerURI.startsWith("https://")) {
conf.set(DFSConfigKeys.DFS_JOURNALNODE_HTTPS_ADDRESS_KEY,
uri.substring(("https://".length())));
}
JournalNode newJN = new JournalNode();
newJN.setConf(conf);
newJN.start();
info.node = newJN;
}
public int getQuorumSize() {
return nodes.length / 2 + 1;
}
public int getNumNodes() {
return nodes.length;
}
}
| 6,496 | 28.39819 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestSegmentRecoveryComparator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.qjournal.client;
import static org.junit.Assert.*;
import java.util.Map;
import java.util.Map.Entry;
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto;
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto;
import org.junit.Test;
import org.mockito.Mockito;
import com.google.common.collect.Maps;
import static org.apache.hadoop.hdfs.qjournal.client.SegmentRecoveryComparator.INSTANCE;
public class TestSegmentRecoveryComparator {
private static Map.Entry<AsyncLogger, PrepareRecoveryResponseProto> makeEntry(
PrepareRecoveryResponseProto proto) {
return Maps.immutableEntry(Mockito.mock(AsyncLogger.class), proto);
}
@Test
public void testComparisons() {
Entry<AsyncLogger, PrepareRecoveryResponseProto> INPROGRESS_1_3 =
makeEntry(PrepareRecoveryResponseProto.newBuilder()
.setSegmentState(SegmentStateProto.newBuilder()
.setStartTxId(1L)
.setEndTxId(3L)
.setIsInProgress(true))
.setLastWriterEpoch(0L)
.build());
Entry<AsyncLogger, PrepareRecoveryResponseProto> INPROGRESS_1_4 =
makeEntry(PrepareRecoveryResponseProto.newBuilder()
.setSegmentState(SegmentStateProto.newBuilder()
.setStartTxId(1L)
.setEndTxId(4L)
.setIsInProgress(true))
.setLastWriterEpoch(0L)
.build());
Entry<AsyncLogger, PrepareRecoveryResponseProto> INPROGRESS_1_4_ACCEPTED =
makeEntry(PrepareRecoveryResponseProto.newBuilder()
.setSegmentState(SegmentStateProto.newBuilder()
.setStartTxId(1L)
.setEndTxId(4L)
.setIsInProgress(true))
.setLastWriterEpoch(0L)
.setAcceptedInEpoch(1L)
.build());
Entry<AsyncLogger, PrepareRecoveryResponseProto> FINALIZED_1_3 =
makeEntry(PrepareRecoveryResponseProto.newBuilder()
.setSegmentState(SegmentStateProto.newBuilder()
.setStartTxId(1L)
.setEndTxId(3L)
.setIsInProgress(false))
.setLastWriterEpoch(0L)
.build());
// Should compare equal to itself
assertEquals(0, INSTANCE.compare(INPROGRESS_1_3, INPROGRESS_1_3));
// Longer log wins.
assertEquals(-1, INSTANCE.compare(INPROGRESS_1_3, INPROGRESS_1_4));
assertEquals(1, INSTANCE.compare(INPROGRESS_1_4, INPROGRESS_1_3));
// Finalized log wins even over a longer in-progress
assertEquals(-1, INSTANCE.compare(INPROGRESS_1_4, FINALIZED_1_3));
assertEquals(1, INSTANCE.compare(FINALIZED_1_3, INPROGRESS_1_4));
// Finalized log wins even if the in-progress one has an accepted
// recovery proposal.
assertEquals(-1, INSTANCE.compare(INPROGRESS_1_4_ACCEPTED, FINALIZED_1_3));
assertEquals(1, INSTANCE.compare(FINALIZED_1_3, INPROGRESS_1_4_ACCEPTED));
}
}
| 3,772 | 38.715789 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.qjournal.client;
import static org.apache.hadoop.hdfs.qjournal.QJMTestUtil.FAKE_NSINFO;
import static org.apache.hadoop.hdfs.qjournal.QJMTestUtil.JID;
import static org.apache.hadoop.hdfs.qjournal.QJMTestUtil.verifyEdits;
import static org.apache.hadoop.hdfs.qjournal.QJMTestUtil.writeSegment;
import static org.apache.hadoop.hdfs.qjournal.QJMTestUtil.writeTxns;
import static org.apache.hadoop.hdfs.qjournal.client.TestQuorumJournalManagerUnit.futureThrows;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.Closeable;
import java.io.File;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.URISyntaxException;
import java.net.URL;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ExecutorService;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster;
import org.apache.hadoop.hdfs.qjournal.QJMTestUtil;
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto;
import org.apache.hadoop.hdfs.qjournal.server.JournalFaultInjector;
import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream;
import org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream;
import org.apache.hadoop.hdfs.server.namenode.FileJournalManager;
import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile;
import org.apache.hadoop.hdfs.server.namenode.NNStorage;
import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
import org.mockito.stubbing.Stubber;
import com.google.common.collect.Lists;
import com.google.common.util.concurrent.MoreExecutors;
/**
* Functional tests for QuorumJournalManager.
* For true unit tests, see {@link TestQuorumJournalManagerUnit}.
*/
public class TestQuorumJournalManager {
private static final Log LOG = LogFactory.getLog(
TestQuorumJournalManager.class);
private MiniJournalCluster cluster;
private Configuration conf;
private QuorumJournalManager qjm;
private List<AsyncLogger> spies;
private final List<QuorumJournalManager> toClose = Lists.newLinkedList();
static {
((Log4JLogger)ProtobufRpcEngine.LOG).getLogger().setLevel(Level.ALL);
}
@Before
public void setup() throws Exception {
conf = new Configuration();
// Don't retry connections - it just slows down the tests.
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
cluster = new MiniJournalCluster.Builder(conf)
.build();
qjm = createSpyingQJM();
spies = qjm.getLoggerSetForTests().getLoggersForTests();
qjm.format(QJMTestUtil.FAKE_NSINFO);
qjm.recoverUnfinalizedSegments();
assertEquals(1, qjm.getLoggerSetForTests().getEpoch());
}
@After
public void shutdown() throws IOException {
IOUtils.cleanup(LOG, toClose.toArray(new Closeable[0]));
// Should not leak clients between tests -- this can cause flaky tests.
// (See HDFS-4643)
GenericTestUtils.assertNoThreadsMatching(".*IPC Client.*");
if (cluster != null) {
cluster.shutdown();
}
}
/**
* Enqueue a QJM for closing during shutdown. This makes the code a little
* easier to follow, with fewer try..finally clauses necessary.
*/
private QuorumJournalManager closeLater(QuorumJournalManager qjm) {
toClose.add(qjm);
return qjm;
}
@Test
public void testSingleWriter() throws Exception {
writeSegment(cluster, qjm, 1, 3, true);
// Should be finalized
checkRecovery(cluster, 1, 3);
// Start a new segment
writeSegment(cluster, qjm, 4, 1, true);
// Should be finalized
checkRecovery(cluster, 4, 4);
}
@Test
public void testFormat() throws Exception {
QuorumJournalManager qjm = closeLater(new QuorumJournalManager(
conf, cluster.getQuorumJournalURI("testFormat-jid"), FAKE_NSINFO));
assertFalse(qjm.hasSomeData());
qjm.format(FAKE_NSINFO);
assertTrue(qjm.hasSomeData());
}
@Test
public void testReaderWhileAnotherWrites() throws Exception {
QuorumJournalManager readerQjm = closeLater(createSpyingQJM());
List<EditLogInputStream> streams = Lists.newArrayList();
readerQjm.selectInputStreams(streams, 0, false);
assertEquals(0, streams.size());
writeSegment(cluster, qjm, 1, 3, true);
readerQjm.selectInputStreams(streams, 0, false);
try {
assertEquals(1, streams.size());
// Validate the actual stream contents.
EditLogInputStream stream = streams.get(0);
assertEquals(1, stream.getFirstTxId());
assertEquals(3, stream.getLastTxId());
verifyEdits(streams, 1, 3);
assertNull(stream.readOp());
} finally {
IOUtils.cleanup(LOG, streams.toArray(new Closeable[0]));
streams.clear();
}
// Ensure correct results when there is a stream in-progress, but we don't
// ask for in-progress.
writeSegment(cluster, qjm, 4, 3, false);
readerQjm.selectInputStreams(streams, 0, false);
try {
assertEquals(1, streams.size());
EditLogInputStream stream = streams.get(0);
assertEquals(1, stream.getFirstTxId());
assertEquals(3, stream.getLastTxId());
verifyEdits(streams, 1, 3);
} finally {
IOUtils.cleanup(LOG, streams.toArray(new Closeable[0]));
streams.clear();
}
// TODO: check results for selectInputStreams with inProgressOK = true.
// This doesn't currently work, due to a bug where RedundantEditInputStream
// throws an exception if there are any unvalidated in-progress edits in the list!
// But, it shouldn't be necessary for current use cases.
qjm.finalizeLogSegment(4, 6);
readerQjm.selectInputStreams(streams, 0, false);
try {
assertEquals(2, streams.size());
assertEquals(4, streams.get(1).getFirstTxId());
assertEquals(6, streams.get(1).getLastTxId());
verifyEdits(streams, 1, 6);
} finally {
IOUtils.cleanup(LOG, streams.toArray(new Closeable[0]));
streams.clear();
}
}
/**
* Regression test for HDFS-3725. One of the journal nodes is down
* during the writing of one segment, then comes back up later to
* take part in a later segment. Thus, its local edits are
* not a contiguous sequence. This should be handled correctly.
*/
@Test
public void testOneJNMissingSegments() throws Exception {
writeSegment(cluster, qjm, 1, 3, true);
waitForAllPendingCalls(qjm.getLoggerSetForTests());
cluster.getJournalNode(0).stopAndJoin(0);
writeSegment(cluster, qjm, 4, 3, true);
waitForAllPendingCalls(qjm.getLoggerSetForTests());
cluster.restartJournalNode(0);
writeSegment(cluster, qjm, 7, 3, true);
waitForAllPendingCalls(qjm.getLoggerSetForTests());
cluster.getJournalNode(1).stopAndJoin(0);
QuorumJournalManager readerQjm = createSpyingQJM();
List<EditLogInputStream> streams = Lists.newArrayList();
try {
readerQjm.selectInputStreams(streams, 1, false);
verifyEdits(streams, 1, 9);
} finally {
IOUtils.cleanup(LOG, streams.toArray(new Closeable[0]));
readerQjm.close();
}
}
/**
* Regression test for HDFS-3891: selectInputStreams should throw
* an exception when a majority of journalnodes have crashed.
*/
@Test
public void testSelectInputStreamsMajorityDown() throws Exception {
// Shut down all of the JNs.
cluster.shutdown();
List<EditLogInputStream> streams = Lists.newArrayList();
try {
qjm.selectInputStreams(streams, 0, false);
fail("Did not throw IOE");
} catch (QuorumException ioe) {
GenericTestUtils.assertExceptionContains(
"Got too many exceptions", ioe);
assertTrue(streams.isEmpty());
}
}
/**
* Test the case where the NN crashes after starting a new segment
* on all nodes, but before writing the first transaction to it.
*/
@Test
public void testCrashAtBeginningOfSegment() throws Exception {
writeSegment(cluster, qjm, 1, 3, true);
waitForAllPendingCalls(qjm.getLoggerSetForTests());
EditLogOutputStream stm = qjm.startLogSegment(4,
NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
try {
waitForAllPendingCalls(qjm.getLoggerSetForTests());
} finally {
stm.abort();
}
// Make a new QJM
qjm = closeLater(new QuorumJournalManager(
conf, cluster.getQuorumJournalURI(JID), FAKE_NSINFO));
qjm.recoverUnfinalizedSegments();
checkRecovery(cluster, 1, 3);
writeSegment(cluster, qjm, 4, 3, true);
}
@Test
public void testOutOfSyncAtBeginningOfSegment0() throws Exception {
doTestOutOfSyncAtBeginningOfSegment(0);
}
@Test
public void testOutOfSyncAtBeginningOfSegment1() throws Exception {
doTestOutOfSyncAtBeginningOfSegment(1);
}
@Test
public void testOutOfSyncAtBeginningOfSegment2() throws Exception {
doTestOutOfSyncAtBeginningOfSegment(2);
}
/**
* Test the case where, at the beginning of a segment, transactions
* have been written to one JN but not others.
*/
public void doTestOutOfSyncAtBeginningOfSegment(int nodeWithOneTxn)
throws Exception {
int nodeWithEmptySegment = (nodeWithOneTxn + 1) % 3;
int nodeMissingSegment = (nodeWithOneTxn + 2) % 3;
writeSegment(cluster, qjm, 1, 3, true);
waitForAllPendingCalls(qjm.getLoggerSetForTests());
cluster.getJournalNode(nodeMissingSegment).stopAndJoin(0);
// Open segment on 2/3 nodes
EditLogOutputStream stm = qjm.startLogSegment(4,
NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
try {
waitForAllPendingCalls(qjm.getLoggerSetForTests());
// Write transactions to only 1/3 nodes
failLoggerAtTxn(spies.get(nodeWithEmptySegment), 4);
try {
writeTxns(stm, 4, 1);
fail("Did not fail even though 2/3 failed");
} catch (QuorumException qe) {
GenericTestUtils.assertExceptionContains("mock failure", qe);
}
} finally {
stm.abort();
}
// Bring back the down JN.
cluster.restartJournalNode(nodeMissingSegment);
// Make a new QJM. At this point, the state is as follows:
// A: nodeWithEmptySegment: 1-3 finalized, 4_inprogress (empty)
// B: nodeWithOneTxn: 1-3 finalized, 4_inprogress (1 txn)
// C: nodeMissingSegment: 1-3 finalized
GenericTestUtils.assertGlobEquals(
cluster.getCurrentDir(nodeWithEmptySegment, JID),
"edits_.*",
NNStorage.getFinalizedEditsFileName(1, 3),
NNStorage.getInProgressEditsFileName(4));
GenericTestUtils.assertGlobEquals(
cluster.getCurrentDir(nodeWithOneTxn, JID),
"edits_.*",
NNStorage.getFinalizedEditsFileName(1, 3),
NNStorage.getInProgressEditsFileName(4));
GenericTestUtils.assertGlobEquals(
cluster.getCurrentDir(nodeMissingSegment, JID),
"edits_.*",
NNStorage.getFinalizedEditsFileName(1, 3));
// Stop one of the nodes. Since we run this test three
// times, rotating the roles of the nodes, we'll test
// all the permutations.
cluster.getJournalNode(2).stopAndJoin(0);
qjm = createSpyingQJM();
qjm.recoverUnfinalizedSegments();
if (nodeWithOneTxn == 0 ||
nodeWithOneTxn == 1) {
// If the node that had the transaction committed was one of the nodes
// that responded during recovery, then we should have recovered txid
// 4.
checkRecovery(cluster, 4, 4);
writeSegment(cluster, qjm, 5, 3, true);
} else {
// Otherwise, we should have recovered only 1-3 and should be able to
// start a segment at 4.
checkRecovery(cluster, 1, 3);
writeSegment(cluster, qjm, 4, 3, true);
}
}
/**
* Test case where a new writer picks up from an old one with no failures
* and the previous unfinalized segment entirely consistent -- i.e. all
* the JournalNodes end at the same transaction ID.
*/
@Test
public void testChangeWritersLogsInSync() throws Exception {
writeSegment(cluster, qjm, 1, 3, false);
QJMTestUtil.assertExistsInQuorum(cluster,
NNStorage.getInProgressEditsFileName(1));
// Make a new QJM
qjm = closeLater(new QuorumJournalManager(
conf, cluster.getQuorumJournalURI(JID), FAKE_NSINFO));
qjm.recoverUnfinalizedSegments();
checkRecovery(cluster, 1, 3);
}
/**
* Test case where a new writer picks up from an old one which crashed
* with the three loggers at different txnids
*/
@Test
public void testChangeWritersLogsOutOfSync1() throws Exception {
// Journal states: [3, 4, 5]
// During recovery: [x, 4, 5]
// Should recovery to txn 5
doOutOfSyncTest(0, 5L);
}
@Test
public void testChangeWritersLogsOutOfSync2() throws Exception {
// Journal states: [3, 4, 5]
// During recovery: [3, x, 5]
// Should recovery to txn 5
doOutOfSyncTest(1, 5L);
}
@Test
public void testChangeWritersLogsOutOfSync3() throws Exception {
// Journal states: [3, 4, 5]
// During recovery: [3, 4, x]
// Should recovery to txn 4
doOutOfSyncTest(2, 4L);
}
private void doOutOfSyncTest(int missingOnRecoveryIdx,
long expectedRecoveryTxnId) throws Exception {
setupLoggers345();
QJMTestUtil.assertExistsInQuorum(cluster,
NNStorage.getInProgressEditsFileName(1));
// Shut down the specified JN, so it's not present during recovery.
cluster.getJournalNode(missingOnRecoveryIdx).stopAndJoin(0);
// Make a new QJM
qjm = createSpyingQJM();
qjm.recoverUnfinalizedSegments();
checkRecovery(cluster, 1, expectedRecoveryTxnId);
}
private void failLoggerAtTxn(AsyncLogger spy, long txid) {
TestQuorumJournalManagerUnit.futureThrows(new IOException("mock failure"))
.when(spy).sendEdits(Mockito.anyLong(),
Mockito.eq(txid), Mockito.eq(1), Mockito.<byte[]>any());
}
/**
* Test the case where one of the loggers misses a finalizeLogSegment()
* call, and then misses the next startLogSegment() call before coming
* back to life.
*
* Previously, this caused it to keep on writing to the old log segment,
* such that one logger had eg edits_1-10 while the others had edits_1-5 and
* edits_6-10. This caused recovery to fail in certain cases.
*/
@Test
public void testMissFinalizeAndNextStart() throws Exception {
// Logger 0: miss finalize(1-3) and start(4)
futureThrows(new IOException("injected")).when(spies.get(0))
.finalizeLogSegment(Mockito.eq(1L), Mockito.eq(3L));
futureThrows(new IOException("injected")).when(spies.get(0))
.startLogSegment(Mockito.eq(4L),
Mockito.eq(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION));
// Logger 1: fail at txn id 4
failLoggerAtTxn(spies.get(1), 4L);
writeSegment(cluster, qjm, 1, 3, true);
EditLogOutputStream stm = qjm.startLogSegment(4,
NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
try {
writeTxns(stm, 4, 1);
fail("Did not fail to write");
} catch (QuorumException qe) {
// Should fail, because logger 1 had an injected fault and
// logger 0 should detect writer out of sync
GenericTestUtils.assertExceptionContains("Writer out of sync",
qe);
} finally {
stm.abort();
qjm.close();
}
// State:
// Logger 0: 1-3 in-progress (since it missed finalize)
// Logger 1: 1-3 finalized
// Logger 2: 1-3 finalized, 4 in-progress with one txn
// Shut down logger 2 so it doesn't participate in recovery
cluster.getJournalNode(2).stopAndJoin(0);
qjm = createSpyingQJM();
long recovered = QJMTestUtil.recoverAndReturnLastTxn(qjm);
assertEquals(3L, recovered);
}
/**
* edit lengths [3,4,5]
* first recovery:
* - sees [3,4,x]
* - picks length 4 for recoveryEndTxId
* - calls acceptRecovery()
* - crashes before finalizing
* second recovery:
* - sees [x, 4, 5]
* - should pick recovery length 4, even though it saw
* a larger txid, because a previous recovery accepted it
*/
@Test
public void testRecoverAfterIncompleteRecovery() throws Exception {
setupLoggers345();
// Shut down the logger that has length = 5
cluster.getJournalNode(2).stopAndJoin(0);
qjm = createSpyingQJM();
spies = qjm.getLoggerSetForTests().getLoggersForTests();
// Allow no logger to finalize
for (AsyncLogger spy : spies) {
TestQuorumJournalManagerUnit.futureThrows(new IOException("injected"))
.when(spy).finalizeLogSegment(Mockito.eq(1L),
Mockito.eq(4L));
}
try {
qjm.recoverUnfinalizedSegments();
fail("Should have failed recovery since no finalization occurred");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("injected", ioe);
}
// Now bring back the logger that had 5, and run recovery again.
// We should recover to 4, even though there's a longer log.
cluster.getJournalNode(0).stopAndJoin(0);
cluster.restartJournalNode(2);
qjm = createSpyingQJM();
spies = qjm.getLoggerSetForTests().getLoggersForTests();
qjm.recoverUnfinalizedSegments();
checkRecovery(cluster, 1, 4);
}
/**
* Set up the loggers into the following state:
* - JN0: edits 1-3 in progress
* - JN1: edits 1-4 in progress
* - JN2: edits 1-5 in progress
*
* None of the loggers have any associated paxos info.
*/
private void setupLoggers345() throws Exception {
EditLogOutputStream stm = qjm.startLogSegment(1,
NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
failLoggerAtTxn(spies.get(0), 4);
failLoggerAtTxn(spies.get(1), 5);
writeTxns(stm, 1, 3);
// This should succeed to 2/3 loggers
writeTxns(stm, 4, 1);
// This should only succeed to 1 logger (index 2). Hence it should
// fail
try {
writeTxns(stm, 5, 1);
fail("Did not fail to write when only a minority succeeded");
} catch (QuorumException qe) {
GenericTestUtils.assertExceptionContains(
"too many exceptions to achieve quorum size 2/3",
qe);
}
}
/**
* Set up the following tricky edge case state which is used by
* multiple tests:
*
* Initial writer:
* - Writing to 3 JNs: JN0, JN1, JN2:
* - A log segment with txnid 1 through 100 succeeds.
* - The first transaction in the next segment only goes to JN0
* before the writer crashes (eg it is partitioned)
*
* Recovery by another writer:
* - The new NN starts recovery and talks to all three. Thus, it sees
* that the newest log segment which needs recovery is 101.
* - It sends the prepareRecovery(101) call, and decides that the
* recovery length for 101 is only the 1 transaction.
* - It sends acceptRecovery(101-101) to only JN0, before crashing
*
* This yields the following state:
* - JN0: 1-100 finalized, 101_inprogress, accepted recovery: 101-101
* - JN1: 1-100 finalized, 101_inprogress.empty
* - JN2: 1-100 finalized, 101_inprogress.empty
* (the .empty files got moved aside during recovery)
* @throws Exception
*/
private void setupEdgeCaseOneJnHasSegmentWithAcceptedRecovery() throws Exception {
// Log segment with txns 1-100 succeeds
writeSegment(cluster, qjm, 1, 100, true);
// startLogSegment only makes it to one of the three nodes
failLoggerAtTxn(spies.get(1), 101);
failLoggerAtTxn(spies.get(2), 101);
try {
writeSegment(cluster, qjm, 101, 1, true);
fail("Should have failed");
} catch (QuorumException qe) {
GenericTestUtils.assertExceptionContains("mock failure", qe);
} finally {
qjm.close();
}
// Recovery 1:
// make acceptRecovery() only make it to the node which has txid 101
// this should fail because only 1/3 accepted the recovery
qjm = createSpyingQJM();
spies = qjm.getLoggerSetForTests().getLoggersForTests();
futureThrows(new IOException("mock failure")).when(spies.get(1))
.acceptRecovery(Mockito.<SegmentStateProto>any(), Mockito.<URL>any());
futureThrows(new IOException("mock failure")).when(spies.get(2))
.acceptRecovery(Mockito.<SegmentStateProto>any(), Mockito.<URL>any());
try {
qjm.recoverUnfinalizedSegments();
fail("Should have failed to recover");
} catch (QuorumException qe) {
GenericTestUtils.assertExceptionContains("mock failure", qe);
} finally {
qjm.close();
}
// Check that we have entered the expected state as described in the
// method javadoc.
GenericTestUtils.assertGlobEquals(cluster.getCurrentDir(0, JID),
"edits_.*",
NNStorage.getFinalizedEditsFileName(1, 100),
NNStorage.getInProgressEditsFileName(101));
GenericTestUtils.assertGlobEquals(cluster.getCurrentDir(1, JID),
"edits_.*",
NNStorage.getFinalizedEditsFileName(1, 100),
NNStorage.getInProgressEditsFileName(101) + ".empty");
GenericTestUtils.assertGlobEquals(cluster.getCurrentDir(2, JID),
"edits_.*",
NNStorage.getFinalizedEditsFileName(1, 100),
NNStorage.getInProgressEditsFileName(101) + ".empty");
File paxos0 = new File(cluster.getCurrentDir(0, JID), "paxos");
File paxos1 = new File(cluster.getCurrentDir(1, JID), "paxos");
File paxos2 = new File(cluster.getCurrentDir(2, JID), "paxos");
GenericTestUtils.assertGlobEquals(paxos0, ".*", "101");
GenericTestUtils.assertGlobEquals(paxos1, ".*");
GenericTestUtils.assertGlobEquals(paxos2, ".*");
}
/**
* Test an edge case discovered by randomized testing.
*
* Starts with the edge case state set up by
* {@link #setupEdgeCaseOneJnHasSegmentWithAcceptedRecovery()}
*
* Recovery 2:
* - New NN starts recovery and only talks to JN1 and JN2. JN0 has
* crashed. Since they have no logs open, they say they don't need
* recovery.
* - Starts writing segment 101, and writes 50 transactions before crashing.
*
* Recovery 3:
* - JN0 has come back to life.
* - New NN starts recovery and talks to all three. All three have
* segments open from txid 101, so it calls prepareRecovery(101)
* - JN0 has an already-accepted value for segment 101, so it replies
* "you should recover 101-101"
* - Former incorrect behavior: NN truncates logs to txid 101 even though
* it should have recovered through 150.
*
* In this case, even though there is an accepted recovery decision,
* the newer log segments should take precedence, since they were written
* in a newer epoch than the recorded decision.
*/
@Test
public void testNewerVersionOfSegmentWins() throws Exception {
setupEdgeCaseOneJnHasSegmentWithAcceptedRecovery();
// Now start writing again without JN0 present:
cluster.getJournalNode(0).stopAndJoin(0);
qjm = createSpyingQJM();
try {
assertEquals(100, QJMTestUtil.recoverAndReturnLastTxn(qjm));
// Write segment but do not finalize
writeSegment(cluster, qjm, 101, 50, false);
} finally {
qjm.close();
}
// Now try to recover a new writer, with JN0 present,
// and ensure that all of the above-written transactions are recovered.
cluster.restartJournalNode(0);
qjm = createSpyingQJM();
try {
assertEquals(150, QJMTestUtil.recoverAndReturnLastTxn(qjm));
} finally {
qjm.close();
}
}
/**
* Test another edge case discovered by randomized testing.
*
* Starts with the edge case state set up by
* {@link #setupEdgeCaseOneJnHasSegmentWithAcceptedRecovery()}
*
* Recovery 2:
* - New NN starts recovery and only talks to JN1 and JN2. JN0 has
* crashed. Since they have no logs open, they say they don't need
* recovery.
* - Before writing any transactions, JN0 comes back to life and
* JN1 crashes.
* - Starts writing segment 101, and writes 50 transactions before crashing.
*
* Recovery 3:
* - JN1 has come back to life. JN2 crashes.
* - New NN starts recovery and talks to all three. All three have
* segments open from txid 101, so it calls prepareRecovery(101)
* - JN0 has an already-accepted value for segment 101, so it replies
* "you should recover 101-101"
* - Former incorrect behavior: NN truncates logs to txid 101 even though
* it should have recovered through 150.
*
* In this case, even though there is an accepted recovery decision,
* the newer log segments should take precedence, since they were written
* in a newer epoch than the recorded decision.
*/
@Test
public void testNewerVersionOfSegmentWins2() throws Exception {
setupEdgeCaseOneJnHasSegmentWithAcceptedRecovery();
// Recover without JN0 present.
cluster.getJournalNode(0).stopAndJoin(0);
qjm = createSpyingQJM();
try {
assertEquals(100, QJMTestUtil.recoverAndReturnLastTxn(qjm));
// After recovery, JN0 comes back to life and JN1 crashes.
cluster.restartJournalNode(0);
cluster.getJournalNode(1).stopAndJoin(0);
// Write segment but do not finalize
writeSegment(cluster, qjm, 101, 50, false);
} finally {
qjm.close();
}
// State:
// JN0: 1-100 finalized, 101_inprogress (txns up to 150)
// Previously, JN0 had an accepted recovery 101-101 from an earlier recovery
// attempt.
// JN1: 1-100 finalized
// JN2: 1-100 finalized, 101_inprogress (txns up to 150)
// We need to test that the accepted recovery 101-101 on JN0 doesn't
// end up truncating the log back to 101.
cluster.restartJournalNode(1);
cluster.getJournalNode(2).stopAndJoin(0);
qjm = createSpyingQJM();
try {
assertEquals(150, QJMTestUtil.recoverAndReturnLastTxn(qjm));
} finally {
qjm.close();
}
}
@Test(timeout=20000)
public void testCrashBetweenSyncLogAndPersistPaxosData() throws Exception {
JournalFaultInjector faultInjector =
JournalFaultInjector.instance = Mockito.mock(JournalFaultInjector.class);
setupLoggers345();
// Run recovery where the client only talks to JN0, JN1, such that it
// decides that the correct length is through txid 4.
// Only allow it to call acceptRecovery() on JN0.
qjm = createSpyingQJM();
spies = qjm.getLoggerSetForTests().getLoggersForTests();
cluster.getJournalNode(2).stopAndJoin(0);
injectIOE().when(spies.get(1)).acceptRecovery(
Mockito.<SegmentStateProto>any(), Mockito.<URL>any());
tryRecoveryExpectingFailure();
cluster.restartJournalNode(2);
// State at this point:
// JN0: edit log for 1-4, paxos recovery data for txid 4
// JN1: edit log for 1-4,
// JN2: edit log for 1-5
// Run recovery again, but don't allow JN0 to respond to the
// prepareRecovery() call. This will cause recovery to decide
// on txid 5.
// Additionally, crash all of the nodes before they persist
// any new paxos data.
qjm = createSpyingQJM();
spies = qjm.getLoggerSetForTests().getLoggersForTests();
injectIOE().when(spies.get(0)).prepareRecovery(Mockito.eq(1L));
Mockito.doThrow(new IOException("Injected")).when(faultInjector)
.beforePersistPaxosData();
tryRecoveryExpectingFailure();
Mockito.reset(faultInjector);
// State at this point:
// JN0: edit log for 1-5, paxos recovery data for txid 4
// !!! This is the interesting bit, above. The on-disk data and the
// paxos data don't match up!
// JN1: edit log for 1-5,
// JN2: edit log for 1-5,
// Now, stop JN2, and see if we can still start up even though
// JN0 is in a strange state where its log data is actually newer
// than its accepted Paxos state.
cluster.getJournalNode(2).stopAndJoin(0);
qjm = createSpyingQJM();
try {
long recovered = QJMTestUtil.recoverAndReturnLastTxn(qjm);
assertTrue(recovered >= 4); // 4 was committed to a quorum
} finally {
qjm.close();
}
}
private void tryRecoveryExpectingFailure() throws IOException {
try {
QJMTestUtil.recoverAndReturnLastTxn(qjm);
fail("Expected to fail recovery");
} catch (QuorumException qe) {
GenericTestUtils.assertExceptionContains("Injected", qe);
} finally {
qjm.close();
}
}
private Stubber injectIOE() {
return futureThrows(new IOException("Injected"));
}
@Test
public void testPurgeLogs() throws Exception {
for (int txid = 1; txid <= 5; txid++) {
writeSegment(cluster, qjm, txid, 1, true);
}
File curDir = cluster.getCurrentDir(0, JID);
GenericTestUtils.assertGlobEquals(curDir, "edits_.*",
NNStorage.getFinalizedEditsFileName(1, 1),
NNStorage.getFinalizedEditsFileName(2, 2),
NNStorage.getFinalizedEditsFileName(3, 3),
NNStorage.getFinalizedEditsFileName(4, 4),
NNStorage.getFinalizedEditsFileName(5, 5));
File paxosDir = new File(curDir, "paxos");
GenericTestUtils.assertExists(paxosDir);
// Create new files in the paxos directory, which should get purged too.
assertTrue(new File(paxosDir, "1").createNewFile());
assertTrue(new File(paxosDir, "3").createNewFile());
GenericTestUtils.assertGlobEquals(paxosDir, "\\d+",
"1", "3");
// Create some temporary files of the sort that are used during recovery.
assertTrue(new File(curDir,
"edits_inprogress_0000000000000000001.epoch=140").createNewFile());
assertTrue(new File(curDir,
"edits_inprogress_0000000000000000002.empty").createNewFile());
qjm.purgeLogsOlderThan(3);
// Log purging is asynchronous, so we have to wait for the calls
// to be sent and respond before verifying.
waitForAllPendingCalls(qjm.getLoggerSetForTests());
// Older edits should be purged
GenericTestUtils.assertGlobEquals(curDir, "edits_.*",
NNStorage.getFinalizedEditsFileName(3, 3),
NNStorage.getFinalizedEditsFileName(4, 4),
NNStorage.getFinalizedEditsFileName(5, 5));
// Older paxos files should be purged
GenericTestUtils.assertGlobEquals(paxosDir, "\\d+",
"3");
}
@Test
public void testToString() throws Exception {
GenericTestUtils.assertMatches(
qjm.toString(),
"QJM to \\[127.0.0.1:\\d+, 127.0.0.1:\\d+, 127.0.0.1:\\d+\\]");
}
@Test
public void testSelectInputStreamsNotOnBoundary() throws Exception {
final int txIdsPerSegment = 10;
for (int txid = 1; txid <= 5 * txIdsPerSegment; txid += txIdsPerSegment) {
writeSegment(cluster, qjm, txid, txIdsPerSegment, true);
}
File curDir = cluster.getCurrentDir(0, JID);
GenericTestUtils.assertGlobEquals(curDir, "edits_.*",
NNStorage.getFinalizedEditsFileName(1, 10),
NNStorage.getFinalizedEditsFileName(11, 20),
NNStorage.getFinalizedEditsFileName(21, 30),
NNStorage.getFinalizedEditsFileName(31, 40),
NNStorage.getFinalizedEditsFileName(41, 50));
ArrayList<EditLogInputStream> streams = new ArrayList<EditLogInputStream>();
qjm.selectInputStreams(streams, 25, false);
verifyEdits(streams, 25, 50);
}
private QuorumJournalManager createSpyingQJM()
throws IOException, URISyntaxException {
AsyncLogger.Factory spyFactory = new AsyncLogger.Factory() {
@Override
public AsyncLogger createLogger(Configuration conf, NamespaceInfo nsInfo,
String journalId, InetSocketAddress addr) {
AsyncLogger logger = new IPCLoggerChannel(conf, nsInfo, journalId, addr) {
protected ExecutorService createSingleThreadExecutor() {
// Don't parallelize calls to the quorum in the tests.
// This makes the tests more deterministic.
return MoreExecutors.sameThreadExecutor();
}
};
return Mockito.spy(logger);
}
};
return closeLater(new QuorumJournalManager(
conf, cluster.getQuorumJournalURI(JID), FAKE_NSINFO, spyFactory));
}
private static void waitForAllPendingCalls(AsyncLoggerSet als)
throws InterruptedException {
for (AsyncLogger l : als.getLoggersForTests()) {
IPCLoggerChannel ch = (IPCLoggerChannel)l;
ch.waitForAllPendingCalls();
}
}
private void checkRecovery(MiniJournalCluster cluster,
long segmentTxId, long expectedEndTxId)
throws IOException {
int numFinalized = 0;
for (int i = 0; i < cluster.getNumNodes(); i++) {
File logDir = cluster.getCurrentDir(i, JID);
EditLogFile elf = FileJournalManager.getLogFile(logDir, segmentTxId);
if (elf == null) {
continue;
}
if (!elf.isInProgress()) {
numFinalized++;
if (elf.getLastTxId() != expectedEndTxId) {
fail("File " + elf + " finalized to wrong txid, expected " +
expectedEndTxId);
}
}
}
if (numFinalized < cluster.getQuorumSize()) {
fail("Did not find a quorum of finalized logs starting at " +
segmentTxId);
}
}
}
| 34,810 | 34.19818 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumCall.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.qjournal.client;
import static org.junit.Assert.*;
import java.util.Map;
import java.util.TreeMap;
import java.util.concurrent.TimeoutException;
import org.apache.hadoop.hdfs.qjournal.client.QuorumCall;
import org.junit.Test;
import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableMap;
import com.google.common.util.concurrent.SettableFuture;
public class TestQuorumCall {
@Test(timeout=10000)
public void testQuorums() throws Exception {
Map<String, SettableFuture<String>> futures = ImmutableMap.of(
"f1", SettableFuture.<String>create(),
"f2", SettableFuture.<String>create(),
"f3", SettableFuture.<String>create());
QuorumCall<String, String> q = QuorumCall.create(futures);
assertEquals(0, q.countResponses());
futures.get("f1").set("first future");
q.waitFor(1, 0, 0, 100000, "test"); // wait for 1 response
q.waitFor(0, 1, 0, 100000, "test"); // wait for 1 success
assertEquals(1, q.countResponses());
futures.get("f2").setException(new Exception("error"));
assertEquals(2, q.countResponses());
futures.get("f3").set("second future");
q.waitFor(3, 0, 100, 100000, "test"); // wait for 3 responses
q.waitFor(0, 2, 100, 100000, "test"); // 2 successes
assertEquals(3, q.countResponses());
assertEquals("f1=first future,f3=second future",
Joiner.on(",").withKeyValueSeparator("=").join(
new TreeMap<String, String>(q.getResults())));
try {
q.waitFor(0, 4, 100, 10, "test");
fail("Didn't time out waiting for more responses than came back");
} catch (TimeoutException te) {
// expected
}
}
}
| 2,530 | 35.157143 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.qjournal.client;
import static org.apache.hadoop.hdfs.qjournal.QJMTestUtil.FAKE_NSINFO;
import static org.apache.hadoop.hdfs.qjournal.QJMTestUtil.JID;
import static org.apache.hadoop.hdfs.qjournal.QJMTestUtil.writeSegment;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.Closeable;
import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.net.InetSocketAddress;
import java.net.URISyntaxException;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.SortedSet;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster;
import org.apache.hadoop.hdfs.qjournal.QJMTestUtil;
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocol;
import org.apache.hadoop.hdfs.qjournal.server.JournalFaultInjector;
import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
import org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream;
import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.util.Holder;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.Test;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.google.common.util.concurrent.MoreExecutors;
public class TestQJMWithFaults {
private static final Log LOG = LogFactory.getLog(
TestQJMWithFaults.class);
private static final String RAND_SEED_PROPERTY =
"TestQJMWithFaults.random-seed";
private static final int NUM_WRITER_ITERS = 500;
private static final int SEGMENTS_PER_WRITER = 2;
private static final Configuration conf = new Configuration();
static {
// Don't retry connections - it just slows down the tests.
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
// Make tests run faster by avoiding fsync()
EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
}
// Set up fault injection mock.
private static final JournalFaultInjector faultInjector =
JournalFaultInjector.instance = Mockito.mock(JournalFaultInjector.class);
/**
* Run through the creation of a log without any faults injected,
* and count how many RPCs are made to each node. This sets the
* bounds for the other test cases, so they can exhaustively explore
* the space of potential failures.
*/
private static long determineMaxIpcNumber() throws Exception {
Configuration conf = new Configuration();
MiniJournalCluster cluster = new MiniJournalCluster.Builder(conf).build();
QuorumJournalManager qjm = null;
long ret;
try {
qjm = createInjectableQJM(cluster);
qjm.format(FAKE_NSINFO);
doWorkload(cluster, qjm);
SortedSet<Integer> ipcCounts = Sets.newTreeSet();
for (AsyncLogger l : qjm.getLoggerSetForTests().getLoggersForTests()) {
InvocationCountingChannel ch = (InvocationCountingChannel)l;
ch.waitForAllPendingCalls();
ipcCounts.add(ch.getRpcCount());
}
// All of the loggers should have sent the same number of RPCs, since there
// were no failures.
assertEquals(1, ipcCounts.size());
ret = ipcCounts.first();
LOG.info("Max IPC count = " + ret);
} finally {
IOUtils.closeStream(qjm);
cluster.shutdown();
}
return ret;
}
/**
* Sets up two of the nodes to each drop a single RPC, at all
* possible combinations of RPCs. This may result in the
* active writer failing to write. After this point, a new writer
* should be able to recover and continue writing without
* data loss.
*/
@Test
public void testRecoverAfterDoubleFailures() throws Exception {
final long MAX_IPC_NUMBER = determineMaxIpcNumber();
for (int failA = 1; failA <= MAX_IPC_NUMBER; failA++) {
for (int failB = 1; failB <= MAX_IPC_NUMBER; failB++) {
String injectionStr = "(" + failA + ", " + failB + ")";
LOG.info("\n\n-------------------------------------------\n" +
"Beginning test, failing at " + injectionStr + "\n" +
"-------------------------------------------\n\n");
MiniJournalCluster cluster = new MiniJournalCluster.Builder(conf)
.build();
QuorumJournalManager qjm = null;
try {
qjm = createInjectableQJM(cluster);
qjm.format(FAKE_NSINFO);
List<AsyncLogger> loggers = qjm.getLoggerSetForTests().getLoggersForTests();
failIpcNumber(loggers.get(0), failA);
failIpcNumber(loggers.get(1), failB);
int lastAckedTxn = doWorkload(cluster, qjm);
if (lastAckedTxn < 6) {
LOG.info("Failed after injecting failures at " + injectionStr +
". This is expected since we injected a failure in the " +
"majority.");
}
qjm.close();
qjm = null;
// Now should be able to recover
qjm = createInjectableQJM(cluster);
long lastRecoveredTxn = QJMTestUtil.recoverAndReturnLastTxn(qjm);
assertTrue(lastRecoveredTxn >= lastAckedTxn);
writeSegment(cluster, qjm, lastRecoveredTxn + 1, 3, true);
} catch (Throwable t) {
// Test failure! Rethrow with the test setup info so it can be
// easily triaged.
throw new RuntimeException("Test failed with injection: " + injectionStr,
t);
} finally {
cluster.shutdown();
cluster = null;
IOUtils.closeStream(qjm);
qjm = null;
}
}
}
}
/**
* Test case in which three JournalNodes randomly flip flop between
* up and down states every time they get an RPC.
*
* The writer keeps track of the latest ACKed edit, and on every
* recovery operation, ensures that it recovers at least to that
* point or higher. Since at any given point, a majority of JNs
* may be injecting faults, any writer operation is allowed to fail,
* so long as the exception message indicates it failed due to injected
* faults.
*
* Given a random seed, the test should be entirely deterministic.
*/
@Test
public void testRandomized() throws Exception {
long seed;
Long userSpecifiedSeed = Long.getLong(RAND_SEED_PROPERTY);
if (userSpecifiedSeed != null) {
LOG.info("Using seed specified in system property");
seed = userSpecifiedSeed;
// If the user specifies a seed, then we should gather all the
// IPC trace information so that debugging is easier. This makes
// the test run about 25% slower otherwise.
((Log4JLogger)ProtobufRpcEngine.LOG).getLogger().setLevel(Level.ALL);
} else {
seed = new Random().nextLong();
}
LOG.info("Random seed: " + seed);
Random r = new Random(seed);
MiniJournalCluster cluster = new MiniJournalCluster.Builder(conf)
.build();
// Format the cluster using a non-faulty QJM.
QuorumJournalManager qjmForInitialFormat =
createInjectableQJM(cluster);
qjmForInitialFormat.format(FAKE_NSINFO);
qjmForInitialFormat.close();
try {
long txid = 0;
long lastAcked = 0;
for (int i = 0; i < NUM_WRITER_ITERS; i++) {
LOG.info("Starting writer " + i + "\n-------------------");
QuorumJournalManager qjm = createRandomFaultyQJM(cluster, r);
try {
long recovered;
try {
recovered = QJMTestUtil.recoverAndReturnLastTxn(qjm);
} catch (Throwable t) {
LOG.info("Failed recovery", t);
checkException(t);
continue;
}
assertTrue("Recovered only up to txnid " + recovered +
" but had gotten an ack for " + lastAcked,
recovered >= lastAcked);
txid = recovered + 1;
// Periodically purge old data on disk so it's easier to look
// at failure cases.
if (txid > 100 && i % 10 == 1) {
qjm.purgeLogsOlderThan(txid - 100);
}
Holder<Throwable> thrown = new Holder<Throwable>(null);
for (int j = 0; j < SEGMENTS_PER_WRITER; j++) {
lastAcked = writeSegmentUntilCrash(cluster, qjm, txid, 4, thrown);
if (thrown.held != null) {
LOG.info("Failed write", thrown.held);
checkException(thrown.held);
break;
}
txid += 4;
}
} finally {
qjm.close();
}
}
} finally {
cluster.shutdown();
}
}
private void checkException(Throwable t) {
GenericTestUtils.assertExceptionContains("Injected", t);
if (t.toString().contains("AssertionError")) {
throw new RuntimeException("Should never see AssertionError in fault test!",
t);
}
}
private long writeSegmentUntilCrash(MiniJournalCluster cluster,
QuorumJournalManager qjm, long txid, int numTxns, Holder<Throwable> thrown) {
long firstTxId = txid;
long lastAcked = txid - 1;
try {
EditLogOutputStream stm = qjm.startLogSegment(txid,
NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
for (int i = 0; i < numTxns; i++) {
QJMTestUtil.writeTxns(stm, txid++, 1);
lastAcked++;
}
stm.close();
qjm.finalizeLogSegment(firstTxId, lastAcked);
} catch (Throwable t) {
thrown.held = t;
}
return lastAcked;
}
/**
* Run a simple workload of becoming the active writer and writing
* two log segments: 1-3 and 4-6.
*/
private static int doWorkload(MiniJournalCluster cluster,
QuorumJournalManager qjm) throws IOException {
int lastAcked = 0;
try {
qjm.recoverUnfinalizedSegments();
writeSegment(cluster, qjm, 1, 3, true);
lastAcked = 3;
writeSegment(cluster, qjm, 4, 3, true);
lastAcked = 6;
} catch (QuorumException qe) {
LOG.info("Failed to write at txid " + lastAcked,
qe);
}
return lastAcked;
}
/**
* Inject a failure at the given IPC number, such that the JN never
* receives the RPC. The client side sees an IOException. Future
* IPCs after this number will be received as usual.
*/
private void failIpcNumber(AsyncLogger logger, int idx) {
((InvocationCountingChannel)logger).failIpcNumber(idx);
}
private static class RandomFaultyChannel extends IPCLoggerChannel {
private final Random random;
private final float injectionProbability = 0.1f;
private boolean isUp = true;
public RandomFaultyChannel(Configuration conf, NamespaceInfo nsInfo,
String journalId, InetSocketAddress addr, long seed) {
super(conf, nsInfo, journalId, addr);
this.random = new Random(seed);
}
@Override
protected QJournalProtocol createProxy() throws IOException {
QJournalProtocol realProxy = super.createProxy();
return mockProxy(
new WrapEveryCall<Object>(realProxy) {
@Override
void beforeCall(InvocationOnMock invocation) throws Exception {
if (random.nextFloat() < injectionProbability) {
isUp = !isUp;
LOG.info("transitioned " + addr + " to " +
(isUp ? "up" : "down"));
}
if (!isUp) {
throw new IOException("Injected - faking being down");
}
if (invocation.getMethod().getName().equals("acceptRecovery")) {
if (random.nextFloat() < injectionProbability) {
Mockito.doThrow(new IOException(
"Injected - faking fault before persisting paxos data"))
.when(faultInjector).beforePersistPaxosData();
} else if (random.nextFloat() < injectionProbability) {
Mockito.doThrow(new IOException(
"Injected - faking fault after persisting paxos data"))
.when(faultInjector).afterPersistPaxosData();
}
}
}
@Override
public void afterCall(InvocationOnMock invocation, boolean succeeded) {
Mockito.reset(faultInjector);
}
});
}
@Override
protected ExecutorService createSingleThreadExecutor() {
return MoreExecutors.sameThreadExecutor();
}
}
private static class InvocationCountingChannel extends IPCLoggerChannel {
private int rpcCount = 0;
private final Map<Integer, Callable<Void>> injections = Maps.newHashMap();
public InvocationCountingChannel(Configuration conf, NamespaceInfo nsInfo,
String journalId, InetSocketAddress addr) {
super(conf, nsInfo, journalId, addr);
}
int getRpcCount() {
return rpcCount;
}
void failIpcNumber(final int idx) {
Preconditions.checkArgument(idx > 0,
"id must be positive");
inject(idx, new Callable<Void>() {
@Override
public Void call() throws Exception {
throw new IOException("injected failed IPC at " + idx);
}
});
}
private void inject(int beforeRpcNumber, Callable<Void> injectedCode) {
injections.put(beforeRpcNumber, injectedCode);
}
@Override
protected QJournalProtocol createProxy() throws IOException {
final QJournalProtocol realProxy = super.createProxy();
QJournalProtocol mock = mockProxy(
new WrapEveryCall<Object>(realProxy) {
void beforeCall(InvocationOnMock invocation) throws Exception {
rpcCount++;
String callStr = "[" + addr + "] " +
invocation.getMethod().getName() + "(" +
Joiner.on(", ").join(invocation.getArguments()) + ")";
Callable<Void> inject = injections.get(rpcCount);
if (inject != null) {
LOG.info("Injecting code before IPC #" + rpcCount + ": " +
callStr);
inject.call();
} else {
LOG.info("IPC call #" + rpcCount + ": " + callStr);
}
}
});
return mock;
}
}
private static QJournalProtocol mockProxy(WrapEveryCall<Object> wrapper)
throws IOException {
QJournalProtocol mock = Mockito.mock(QJournalProtocol.class,
Mockito.withSettings()
.defaultAnswer(wrapper)
.extraInterfaces(Closeable.class));
return mock;
}
private static abstract class WrapEveryCall<T> implements Answer<T> {
private final Object realObj;
WrapEveryCall(Object realObj) {
this.realObj = realObj;
}
@SuppressWarnings("unchecked")
@Override
public T answer(InvocationOnMock invocation) throws Throwable {
// Don't want to inject an error on close() since that isn't
// actually an IPC call!
if (!Closeable.class.equals(
invocation.getMethod().getDeclaringClass())) {
beforeCall(invocation);
}
boolean success = false;
try {
T ret = (T) invocation.getMethod().invoke(realObj,
invocation.getArguments());
success = true;
return ret;
} catch (InvocationTargetException ite) {
throw ite.getCause();
} finally {
afterCall(invocation, success);
}
}
abstract void beforeCall(InvocationOnMock invocation) throws Exception;
void afterCall(InvocationOnMock invocation, boolean succeeded) {}
}
private static QuorumJournalManager createInjectableQJM(MiniJournalCluster cluster)
throws IOException, URISyntaxException {
AsyncLogger.Factory spyFactory = new AsyncLogger.Factory() {
@Override
public AsyncLogger createLogger(Configuration conf, NamespaceInfo nsInfo,
String journalId, InetSocketAddress addr) {
return new InvocationCountingChannel(conf, nsInfo, journalId, addr);
}
};
return new QuorumJournalManager(conf, cluster.getQuorumJournalURI(JID),
FAKE_NSINFO, spyFactory);
}
private static QuorumJournalManager createRandomFaultyQJM(
MiniJournalCluster cluster, final Random seedGenerator)
throws IOException, URISyntaxException {
AsyncLogger.Factory spyFactory = new AsyncLogger.Factory() {
@Override
public AsyncLogger createLogger(Configuration conf, NamespaceInfo nsInfo,
String journalId, InetSocketAddress addr) {
return new RandomFaultyChannel(conf, nsInfo, journalId, addr,
seedGenerator.nextLong());
}
};
return new QuorumJournalManager(conf, cluster.getQuorumJournalURI(JID),
FAKE_NSINFO, spyFactory);
}
}
| 18,385 | 34.700971 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManagerUnit.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.qjournal.client;
import static org.junit.Assert.fail;
import static org.mockito.Matchers.anyLong;
import static org.mockito.Matchers.eq;
import java.io.IOException;
import java.net.URI;
import java.util.List;
import org.junit.Assert;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.qjournal.client.AsyncLogger;
import org.apache.hadoop.hdfs.qjournal.client.QuorumException;
import org.apache.hadoop.hdfs.qjournal.client.QuorumJournalManager;
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto;
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto;
import org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream;
import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
import org.mockito.stubbing.Stubber;
import com.google.common.collect.ImmutableList;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.SettableFuture;
import static org.apache.hadoop.hdfs.qjournal.QJMTestUtil.writeOp;
/**
* True unit tests for QuorumJournalManager
*/
public class TestQuorumJournalManagerUnit {
static {
((Log4JLogger)QuorumJournalManager.LOG).getLogger().setLevel(Level.ALL);
}
private static final NamespaceInfo FAKE_NSINFO = new NamespaceInfo(
12345, "mycluster", "my-bp", 0L);
private final Configuration conf = new Configuration();
private List<AsyncLogger> spyLoggers;
private QuorumJournalManager qjm;
@Before
public void setup() throws Exception {
spyLoggers = ImmutableList.of(
mockLogger(),
mockLogger(),
mockLogger());
qjm = new QuorumJournalManager(conf, new URI("qjournal://host/jid"), FAKE_NSINFO) {
@Override
protected List<AsyncLogger> createLoggers(AsyncLogger.Factory factory) {
return spyLoggers;
}
};
for (AsyncLogger logger : spyLoggers) {
futureReturns(GetJournalStateResponseProto.newBuilder()
.setLastPromisedEpoch(0)
.setHttpPort(-1)
.build())
.when(logger).getJournalState();
futureReturns(
NewEpochResponseProto.newBuilder().build()
).when(logger).newEpoch(Mockito.anyLong());
futureReturns(null).when(logger).format(Mockito.<NamespaceInfo>any());
}
qjm.recoverUnfinalizedSegments();
}
private AsyncLogger mockLogger() {
return Mockito.mock(AsyncLogger.class);
}
static <V> Stubber futureReturns(V value) {
ListenableFuture<V> ret = Futures.immediateFuture(value);
return Mockito.doReturn(ret);
}
static Stubber futureThrows(Throwable t) {
ListenableFuture<?> ret = Futures.immediateFailedFuture(t);
return Mockito.doReturn(ret);
}
@Test
public void testAllLoggersStartOk() throws Exception {
futureReturns(null).when(spyLoggers.get(0)).startLogSegment(Mockito.anyLong(),
Mockito.eq(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION));
futureReturns(null).when(spyLoggers.get(1)).startLogSegment(Mockito.anyLong(),
Mockito.eq(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION));
futureReturns(null).when(spyLoggers.get(2)).startLogSegment(Mockito.anyLong(),
Mockito.eq(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION));
qjm.startLogSegment(1, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
}
@Test
public void testQuorumOfLoggersStartOk() throws Exception {
futureReturns(null).when(spyLoggers.get(0)).startLogSegment(Mockito.anyLong(),
Mockito.eq(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION));
futureReturns(null).when(spyLoggers.get(1)).startLogSegment(Mockito.anyLong(),
Mockito.eq(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION));
futureThrows(new IOException("logger failed"))
.when(spyLoggers.get(2)).startLogSegment(Mockito.anyLong(),
Mockito.eq(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION));
qjm.startLogSegment(1, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
}
@Test
public void testQuorumOfLoggersFail() throws Exception {
futureReturns(null).when(spyLoggers.get(0)).startLogSegment(Mockito.anyLong(),
Mockito.eq(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION));
futureThrows(new IOException("logger failed"))
.when(spyLoggers.get(1)).startLogSegment(Mockito.anyLong(),
Mockito.eq(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION));
futureThrows(new IOException("logger failed"))
.when(spyLoggers.get(2)).startLogSegment(Mockito.anyLong(),
Mockito.eq(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION));
try {
qjm.startLogSegment(1, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
fail("Did not throw when quorum failed");
} catch (QuorumException qe) {
GenericTestUtils.assertExceptionContains("logger failed", qe);
}
}
@Test
public void testQuorumOutputStreamReport() throws Exception {
futureReturns(null).when(spyLoggers.get(0)).startLogSegment(Mockito.anyLong(),
Mockito.eq(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION));
futureReturns(null).when(spyLoggers.get(1)).startLogSegment(Mockito.anyLong(),
Mockito.eq(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION));
futureReturns(null).when(spyLoggers.get(2)).startLogSegment(Mockito.anyLong(),
Mockito.eq(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION));
QuorumOutputStream os = (QuorumOutputStream) qjm.startLogSegment(1,
NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
String report = os.generateReport();
Assert.assertFalse("Report should be plain text", report.contains("<"));
}
@Test
public void testWriteEdits() throws Exception {
EditLogOutputStream stm = createLogSegment();
writeOp(stm, 1);
writeOp(stm, 2);
stm.setReadyToFlush();
writeOp(stm, 3);
// The flush should log txn 1-2
futureReturns(null).when(spyLoggers.get(0)).sendEdits(
anyLong(), eq(1L), eq(2), Mockito.<byte[]>any());
futureReturns(null).when(spyLoggers.get(1)).sendEdits(
anyLong(), eq(1L), eq(2), Mockito.<byte[]>any());
futureReturns(null).when(spyLoggers.get(2)).sendEdits(
anyLong(), eq(1L), eq(2), Mockito.<byte[]>any());
stm.flush();
// Another flush should now log txn #3
stm.setReadyToFlush();
futureReturns(null).when(spyLoggers.get(0)).sendEdits(
anyLong(), eq(3L), eq(1), Mockito.<byte[]>any());
futureReturns(null).when(spyLoggers.get(1)).sendEdits(
anyLong(), eq(3L), eq(1), Mockito.<byte[]>any());
futureReturns(null).when(spyLoggers.get(2)).sendEdits(
anyLong(), eq(3L), eq(1), Mockito.<byte[]>any());
stm.flush();
}
@Test
public void testWriteEditsOneSlow() throws Exception {
EditLogOutputStream stm = createLogSegment();
writeOp(stm, 1);
stm.setReadyToFlush();
// Make the first two logs respond immediately
futureReturns(null).when(spyLoggers.get(0)).sendEdits(
anyLong(), eq(1L), eq(1), Mockito.<byte[]>any());
futureReturns(null).when(spyLoggers.get(1)).sendEdits(
anyLong(), eq(1L), eq(1), Mockito.<byte[]>any());
// And the third log not respond
SettableFuture<Void> slowLog = SettableFuture.create();
Mockito.doReturn(slowLog).when(spyLoggers.get(2)).sendEdits(
anyLong(), eq(1L), eq(1), Mockito.<byte[]>any());
stm.flush();
Mockito.verify(spyLoggers.get(0)).setCommittedTxId(1L);
}
private EditLogOutputStream createLogSegment() throws IOException {
futureReturns(null).when(spyLoggers.get(0)).startLogSegment(Mockito.anyLong(),
Mockito.eq(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION));
futureReturns(null).when(spyLoggers.get(1)).startLogSegment(Mockito.anyLong(),
Mockito.eq(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION));
futureReturns(null).when(spyLoggers.get(2)).startLogSegment(Mockito.anyLong(),
Mockito.eq(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION));
EditLogOutputStream stm = qjm.startLogSegment(1,
NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
return stm;
}
}
| 9,243 | 39.017316 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestEpochsAreUnique.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.qjournal.client;
import static org.junit.Assert.*;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.URI;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster;
import org.apache.hadoop.hdfs.qjournal.client.AsyncLogger;
import org.apache.hadoop.hdfs.qjournal.client.QuorumJournalManager;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.junit.Test;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
public class TestEpochsAreUnique {
private static final Log LOG = LogFactory.getLog(TestEpochsAreUnique.class);
private static final String JID = "testEpochsAreUnique-jid";
private static final NamespaceInfo FAKE_NSINFO = new NamespaceInfo(
12345, "mycluster", "my-bp", 0L);
private final Random r = new Random();
@Test
public void testSingleThreaded() throws IOException {
Configuration conf = new Configuration();
MiniJournalCluster cluster = new MiniJournalCluster.Builder(conf).build();
URI uri = cluster.getQuorumJournalURI(JID);
QuorumJournalManager qjm = new QuorumJournalManager(
conf, uri, FAKE_NSINFO);
try {
qjm.format(FAKE_NSINFO);
} finally {
qjm.close();
}
try {
// With no failures or contention, epochs should increase one-by-one
for (int i = 0; i < 5; i++) {
qjm = new QuorumJournalManager(
conf, uri, FAKE_NSINFO);
try {
qjm.createNewUniqueEpoch();
assertEquals(i + 1, qjm.getLoggerSetForTests().getEpoch());
} finally {
qjm.close();
}
}
long prevEpoch = 5;
// With some failures injected, it should still always increase, perhaps
// skipping some
for (int i = 0; i < 20; i++) {
long newEpoch = -1;
while (true) {
qjm = new QuorumJournalManager(
conf, uri, FAKE_NSINFO, new FaultyLoggerFactory());
try {
qjm.createNewUniqueEpoch();
newEpoch = qjm.getLoggerSetForTests().getEpoch();
break;
} catch (IOException ioe) {
// It's OK to fail to create an epoch, since we randomly inject
// faults. It's possible we'll inject faults in too many of the
// underlying nodes, and a failure is expected in that case
} finally {
qjm.close();
}
}
LOG.info("Created epoch " + newEpoch);
assertTrue("New epoch " + newEpoch + " should be greater than previous " +
prevEpoch, newEpoch > prevEpoch);
prevEpoch = newEpoch;
}
} finally {
cluster.shutdown();
}
}
private class FaultyLoggerFactory implements AsyncLogger.Factory {
@Override
public AsyncLogger createLogger(Configuration conf, NamespaceInfo nsInfo,
String journalId, InetSocketAddress addr) {
AsyncLogger ch = IPCLoggerChannel.FACTORY.createLogger(
conf, nsInfo, journalId, addr);
AsyncLogger spy = Mockito.spy(ch);
Mockito.doAnswer(new SometimesFaulty<Long>(0.10f))
.when(spy).getJournalState();
Mockito.doAnswer(new SometimesFaulty<Void>(0.40f))
.when(spy).newEpoch(Mockito.anyLong());
return spy;
}
}
private class SometimesFaulty<T> implements Answer<ListenableFuture<T>> {
private final float faultProbability;
public SometimesFaulty(float faultProbability) {
this.faultProbability = faultProbability;
}
@SuppressWarnings("unchecked")
@Override
public ListenableFuture<T> answer(InvocationOnMock invocation)
throws Throwable {
if (r.nextFloat() < faultProbability) {
return Futures.immediateFailedFuture(
new IOException("Injected fault"));
}
return (ListenableFuture<T>)invocation.callRealMethod();
}
}
}
| 5,010 | 33.558621 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestIPCLoggerChannel.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.qjournal.client;
import static org.junit.Assert.*;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.qjournal.client.IPCLoggerChannel;
import org.apache.hadoop.hdfs.qjournal.client.LoggerTooFarBehindException;
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocol;
import org.apache.hadoop.hdfs.qjournal.protocol.RequestInfo;
import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
import com.google.common.base.Supplier;
public class TestIPCLoggerChannel {
private static final Log LOG = LogFactory.getLog(
TestIPCLoggerChannel.class);
private final Configuration conf = new Configuration();
private static final NamespaceInfo FAKE_NSINFO = new NamespaceInfo(
12345, "mycluster", "my-bp", 0L);
private static final String JID = "test-journalid";
private static final InetSocketAddress FAKE_ADDR =
new InetSocketAddress(0);
private static final byte[] FAKE_DATA = new byte[4096];
private final QJournalProtocol mockProxy = Mockito.mock(QJournalProtocol.class);
private IPCLoggerChannel ch;
private static final int LIMIT_QUEUE_SIZE_MB = 1;
private static final int LIMIT_QUEUE_SIZE_BYTES =
LIMIT_QUEUE_SIZE_MB * 1024 * 1024;
@Before
public void setupMock() {
conf.setInt(DFSConfigKeys.DFS_QJOURNAL_QUEUE_SIZE_LIMIT_KEY,
LIMIT_QUEUE_SIZE_MB);
// Channel to the mock object instead of a real IPC proxy.
ch = new IPCLoggerChannel(conf, FAKE_NSINFO, JID, FAKE_ADDR) {
@Override
protected QJournalProtocol getProxy() throws IOException {
return mockProxy;
}
};
ch.setEpoch(1);
}
@Test
public void testSimpleCall() throws Exception {
ch.sendEdits(1, 1, 3, FAKE_DATA).get();
Mockito.verify(mockProxy).journal(Mockito.<RequestInfo>any(),
Mockito.eq(1L), Mockito.eq(1L),
Mockito.eq(3), Mockito.same(FAKE_DATA));
}
/**
* Test that, once the queue eclipses the configure size limit,
* calls to journal more data are rejected.
*/
@Test
public void testQueueLimiting() throws Exception {
// Block the underlying fake proxy from actually completing any calls.
DelayAnswer delayer = new DelayAnswer(LOG);
Mockito.doAnswer(delayer).when(mockProxy).journal(
Mockito.<RequestInfo>any(),
Mockito.eq(1L), Mockito.eq(1L),
Mockito.eq(1), Mockito.same(FAKE_DATA));
// Queue up the maximum number of calls.
int numToQueue = LIMIT_QUEUE_SIZE_BYTES / FAKE_DATA.length;
for (int i = 1; i <= numToQueue; i++) {
ch.sendEdits(1L, (long)i, 1, FAKE_DATA);
}
// The accounting should show the correct total number queued.
assertEquals(LIMIT_QUEUE_SIZE_BYTES, ch.getQueuedEditsSize());
// Trying to queue any more should fail.
try {
ch.sendEdits(1L, numToQueue + 1, 1, FAKE_DATA).get(1, TimeUnit.SECONDS);
fail("Did not fail to queue more calls after queue was full");
} catch (ExecutionException ee) {
if (!(ee.getCause() instanceof LoggerTooFarBehindException)) {
throw ee;
}
}
delayer.proceed();
// After we allow it to proceeed, it should chug through the original queue
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
return ch.getQueuedEditsSize() == 0;
}
}, 10, 1000);
}
/**
* Test that, if the remote node gets unsynchronized (eg some edits were
* missed or the node rebooted), the client stops sending edits until
* the next roll. Test for HDFS-3726.
*/
@Test
public void testStopSendingEditsWhenOutOfSync() throws Exception {
Mockito.doThrow(new IOException("injected error"))
.when(mockProxy).journal(
Mockito.<RequestInfo>any(),
Mockito.eq(1L), Mockito.eq(1L),
Mockito.eq(1), Mockito.same(FAKE_DATA));
try {
ch.sendEdits(1L, 1L, 1, FAKE_DATA).get();
fail("Injected JOOSE did not cause sendEdits() to throw");
} catch (ExecutionException ee) {
GenericTestUtils.assertExceptionContains("injected", ee);
}
Mockito.verify(mockProxy).journal(
Mockito.<RequestInfo>any(),
Mockito.eq(1L), Mockito.eq(1L),
Mockito.eq(1), Mockito.same(FAKE_DATA));
assertTrue(ch.isOutOfSync());
try {
ch.sendEdits(1L, 2L, 1, FAKE_DATA).get();
fail("sendEdits() should throw until next roll");
} catch (ExecutionException ee) {
GenericTestUtils.assertExceptionContains("disabled until next roll",
ee.getCause());
}
// It should have failed without even sending the edits, since it was not sync.
Mockito.verify(mockProxy, Mockito.never()).journal(
Mockito.<RequestInfo>any(),
Mockito.eq(1L), Mockito.eq(2L),
Mockito.eq(1), Mockito.same(FAKE_DATA));
// It should have sent a heartbeat instead.
Mockito.verify(mockProxy).heartbeat(
Mockito.<RequestInfo>any());
// After a roll, sending new edits should not fail.
ch.startLogSegment(3L, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get();
assertFalse(ch.isOutOfSync());
ch.sendEdits(3L, 3L, 1, FAKE_DATA).get();
}
}
| 6,560 | 35.049451 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournal.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.qjournal.server;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.File;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.qjournal.QJMTestUtil;
import org.apache.hadoop.hdfs.qjournal.protocol.JournalOutOfSyncException;
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto;
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProtoOrBuilder;
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto;
import org.apache.hadoop.hdfs.qjournal.protocol.RequestInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.common.StorageErrorReporter;
import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After;
import org.junit.Assert;
import org.junit.Assume;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
public class TestJournal {
private static final NamespaceInfo FAKE_NSINFO = new NamespaceInfo(
12345, "mycluster", "my-bp", 0L);
private static final NamespaceInfo FAKE_NSINFO_2 = new NamespaceInfo(
6789, "mycluster", "my-bp", 0L);
private static final String JID = "test-journal";
private static final File TEST_LOG_DIR = new File(
new File(MiniDFSCluster.getBaseDirectory()), "TestJournal");
private final StorageErrorReporter mockErrorReporter = Mockito.mock(
StorageErrorReporter.class);
private Configuration conf;
private Journal journal;
@Before
public void setup() throws Exception {
FileUtil.fullyDelete(TEST_LOG_DIR);
conf = new Configuration();
journal = new Journal(conf, TEST_LOG_DIR, JID, StartupOption.REGULAR,
mockErrorReporter);
journal.format(FAKE_NSINFO);
}
@After
public void verifyNoStorageErrors() throws Exception{
Mockito.verify(mockErrorReporter, Mockito.never())
.reportErrorOnFile(Mockito.<File>any());
}
@After
public void cleanup() {
IOUtils.closeStream(journal);
}
/**
* Test whether JNs can correctly handle editlog that cannot be decoded.
*/
@Test
public void testScanEditLog() throws Exception {
// use a future layout version
journal.startLogSegment(makeRI(1), 1,
NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION - 1);
// in the segment we write garbage editlog, which can be scanned but
// cannot be decoded
final int numTxns = 5;
byte[] ops = QJMTestUtil.createGabageTxns(1, 5);
journal.journal(makeRI(2), 1, 1, numTxns, ops);
// verify the in-progress editlog segment
SegmentStateProto segmentState = journal.getSegmentInfo(1);
assertTrue(segmentState.getIsInProgress());
Assert.assertEquals(numTxns, segmentState.getEndTxId());
Assert.assertEquals(1, segmentState.getStartTxId());
// finalize the segment and verify it again
journal.finalizeLogSegment(makeRI(3), 1, numTxns);
segmentState = journal.getSegmentInfo(1);
assertFalse(segmentState.getIsInProgress());
Assert.assertEquals(numTxns, segmentState.getEndTxId());
Assert.assertEquals(1, segmentState.getStartTxId());
}
@Test (timeout = 10000)
public void testEpochHandling() throws Exception {
assertEquals(0, journal.getLastPromisedEpoch());
NewEpochResponseProto newEpoch =
journal.newEpoch(FAKE_NSINFO, 1);
assertFalse(newEpoch.hasLastSegmentTxId());
assertEquals(1, journal.getLastPromisedEpoch());
journal.newEpoch(FAKE_NSINFO, 3);
assertFalse(newEpoch.hasLastSegmentTxId());
assertEquals(3, journal.getLastPromisedEpoch());
try {
journal.newEpoch(FAKE_NSINFO, 3);
fail("Should have failed to promise same epoch twice");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains(
"Proposed epoch 3 <= last promise 3", ioe);
}
try {
journal.startLogSegment(makeRI(1), 12345L,
NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
fail("Should have rejected call from prior epoch");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains(
"epoch 1 is less than the last promised epoch 3", ioe);
}
try {
journal.journal(makeRI(1), 12345L, 100L, 0, new byte[0]);
fail("Should have rejected call from prior epoch");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains(
"epoch 1 is less than the last promised epoch 3", ioe);
}
}
@Test (timeout = 10000)
public void testMaintainCommittedTxId() throws Exception {
journal.newEpoch(FAKE_NSINFO, 1);
journal.startLogSegment(makeRI(1), 1,
NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
// Send txids 1-3, with a request indicating only 0 committed
journal.journal(new RequestInfo(JID, 1, 2, 0), 1, 1, 3,
QJMTestUtil.createTxnData(1, 3));
assertEquals(0, journal.getCommittedTxnIdForTests());
// Send 4-6, with request indicating that through 3 is committed.
journal.journal(new RequestInfo(JID, 1, 3, 3), 1, 4, 3,
QJMTestUtil.createTxnData(4, 6));
assertEquals(3, journal.getCommittedTxnIdForTests());
}
@Test (timeout = 10000)
public void testRestartJournal() throws Exception {
journal.newEpoch(FAKE_NSINFO, 1);
journal.startLogSegment(makeRI(1), 1,
NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
journal.journal(makeRI(2), 1, 1, 2,
QJMTestUtil.createTxnData(1, 2));
// Don't finalize.
String storageString = journal.getStorage().toColonSeparatedString();
System.err.println("storage string: " + storageString);
journal.close(); // close to unlock the storage dir
// Now re-instantiate, make sure history is still there
journal = new Journal(conf, TEST_LOG_DIR, JID, StartupOption.REGULAR,
mockErrorReporter);
// The storage info should be read, even if no writer has taken over.
assertEquals(storageString,
journal.getStorage().toColonSeparatedString());
assertEquals(1, journal.getLastPromisedEpoch());
NewEpochResponseProtoOrBuilder newEpoch = journal.newEpoch(FAKE_NSINFO, 2);
assertEquals(1, newEpoch.getLastSegmentTxId());
}
@Test (timeout = 10000)
public void testFormatResetsCachedValues() throws Exception {
journal.newEpoch(FAKE_NSINFO, 12345L);
journal.startLogSegment(new RequestInfo(JID, 12345L, 1L, 0L), 1L,
NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
assertEquals(12345L, journal.getLastPromisedEpoch());
assertEquals(12345L, journal.getLastWriterEpoch());
assertTrue(journal.isFormatted());
// Close the journal in preparation for reformatting it.
journal.close();
journal.format(FAKE_NSINFO_2);
assertEquals(0, journal.getLastPromisedEpoch());
assertEquals(0, journal.getLastWriterEpoch());
assertTrue(journal.isFormatted());
}
/**
* Test that, if the writer crashes at the very beginning of a segment,
* before any transactions are written, that the next newEpoch() call
* returns the prior segment txid as its most recent segment.
*/
@Test (timeout = 10000)
public void testNewEpochAtBeginningOfSegment() throws Exception {
journal.newEpoch(FAKE_NSINFO, 1);
journal.startLogSegment(makeRI(1), 1,
NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
journal.journal(makeRI(2), 1, 1, 2,
QJMTestUtil.createTxnData(1, 2));
journal.finalizeLogSegment(makeRI(3), 1, 2);
journal.startLogSegment(makeRI(4), 3,
NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
NewEpochResponseProto resp = journal.newEpoch(FAKE_NSINFO, 2);
assertEquals(1, resp.getLastSegmentTxId());
}
@Test (timeout = 10000)
public void testJournalLocking() throws Exception {
Assume.assumeTrue(journal.getStorage().getStorageDir(0).isLockSupported());
StorageDirectory sd = journal.getStorage().getStorageDir(0);
File lockFile = new File(sd.getRoot(), Storage.STORAGE_FILE_LOCK);
// Journal should be locked, since the format() call locks it.
GenericTestUtils.assertExists(lockFile);
journal.newEpoch(FAKE_NSINFO, 1);
try {
new Journal(conf, TEST_LOG_DIR, JID, StartupOption.REGULAR,
mockErrorReporter);
fail("Did not fail to create another journal in same dir");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains(
"Cannot lock storage", ioe);
}
journal.close();
// Journal should no longer be locked after the close() call.
// Hence, should be able to create a new Journal in the same dir.
Journal journal2 = new Journal(conf, TEST_LOG_DIR, JID,
StartupOption.REGULAR, mockErrorReporter);
journal2.newEpoch(FAKE_NSINFO, 2);
journal2.close();
}
/**
* Test finalizing a segment after some batch of edits were missed.
* This should fail, since we validate the log before finalization.
*/
@Test (timeout = 10000)
public void testFinalizeWhenEditsAreMissed() throws Exception {
journal.newEpoch(FAKE_NSINFO, 1);
journal.startLogSegment(makeRI(1), 1,
NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
journal.journal(makeRI(2), 1, 1, 3,
QJMTestUtil.createTxnData(1, 3));
// Try to finalize up to txn 6, even though we only wrote up to txn 3.
try {
journal.finalizeLogSegment(makeRI(3), 1, 6);
fail("did not fail to finalize");
} catch (JournalOutOfSyncException e) {
GenericTestUtils.assertExceptionContains(
"but only written up to txid 3", e);
}
// Check that, even if we re-construct the journal by scanning the
// disk, we don't allow finalizing incorrectly.
journal.close();
journal = new Journal(conf, TEST_LOG_DIR, JID, StartupOption.REGULAR,
mockErrorReporter);
try {
journal.finalizeLogSegment(makeRI(4), 1, 6);
fail("did not fail to finalize");
} catch (JournalOutOfSyncException e) {
GenericTestUtils.assertExceptionContains(
"disk only contains up to txid 3", e);
}
}
/**
* Ensure that finalizing a segment which doesn't exist throws the
* appropriate exception.
*/
@Test (timeout = 10000)
public void testFinalizeMissingSegment() throws Exception {
journal.newEpoch(FAKE_NSINFO, 1);
try {
journal.finalizeLogSegment(makeRI(1), 1000, 1001);
fail("did not fail to finalize");
} catch (JournalOutOfSyncException e) {
GenericTestUtils.assertExceptionContains(
"No log file to finalize at transaction ID 1000", e);
}
}
/**
* Assume that a client is writing to a journal, but loses its connection
* in the middle of a segment. Thus, any future journal() calls in that
* segment may fail, because some txns were missed while the connection was
* down.
*
* Eventually, the connection comes back, and the NN tries to start a new
* segment at a higher txid. This should abort the old one and succeed.
*/
@Test (timeout = 10000)
public void testAbortOldSegmentIfFinalizeIsMissed() throws Exception {
journal.newEpoch(FAKE_NSINFO, 1);
// Start a segment at txid 1, and write a batch of 3 txns.
journal.startLogSegment(makeRI(1), 1,
NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
journal.journal(makeRI(2), 1, 1, 3,
QJMTestUtil.createTxnData(1, 3));
GenericTestUtils.assertExists(
journal.getStorage().getInProgressEditLog(1));
// Try to start new segment at txid 6, this should abort old segment and
// then succeed, allowing us to write txid 6-9.
journal.startLogSegment(makeRI(3), 6,
NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
journal.journal(makeRI(4), 6, 6, 3,
QJMTestUtil.createTxnData(6, 3));
// The old segment should *not* be finalized.
GenericTestUtils.assertExists(
journal.getStorage().getInProgressEditLog(1));
GenericTestUtils.assertExists(
journal.getStorage().getInProgressEditLog(6));
}
/**
* Test behavior of startLogSegment() when a segment with the
* same transaction ID already exists.
*/
@Test (timeout = 10000)
public void testStartLogSegmentWhenAlreadyExists() throws Exception {
journal.newEpoch(FAKE_NSINFO, 1);
// Start a segment at txid 1, and write just 1 transaction. This
// would normally be the START_LOG_SEGMENT transaction.
journal.startLogSegment(makeRI(1), 1,
NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
journal.journal(makeRI(2), 1, 1, 1,
QJMTestUtil.createTxnData(1, 1));
// Try to start new segment at txid 1, this should succeed, because
// we are allowed to re-start a segment if we only ever had the
// START_LOG_SEGMENT transaction logged.
journal.startLogSegment(makeRI(3), 1,
NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
journal.journal(makeRI(4), 1, 1, 1,
QJMTestUtil.createTxnData(1, 1));
// This time through, write more transactions afterwards, simulating
// real user transactions.
journal.journal(makeRI(5), 1, 2, 3,
QJMTestUtil.createTxnData(2, 3));
try {
journal.startLogSegment(makeRI(6), 1,
NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
fail("Did not fail to start log segment which would overwrite " +
"an existing one");
} catch (IllegalStateException ise) {
GenericTestUtils.assertExceptionContains(
"seems to contain valid transactions", ise);
}
journal.finalizeLogSegment(makeRI(7), 1, 4);
// Ensure that we cannot overwrite a finalized segment
try {
journal.startLogSegment(makeRI(8), 1,
NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
fail("Did not fail to start log segment which would overwrite " +
"an existing one");
} catch (IllegalStateException ise) {
GenericTestUtils.assertExceptionContains(
"have a finalized segment", ise);
}
}
private static RequestInfo makeRI(int serial) {
return new RequestInfo(JID, 1, serial, 0);
}
@Test (timeout = 10000)
public void testNamespaceVerification() throws Exception {
journal.newEpoch(FAKE_NSINFO, 1);
try {
journal.newEpoch(FAKE_NSINFO_2, 2);
fail("Did not fail newEpoch() when namespaces mismatched");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains(
"Incompatible namespaceID", ioe);
}
}
}
| 15,868 | 36.693587 | 102 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeMXBean.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.qjournal.server;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.util.HashMap;
import java.util.Map;
import javax.management.MBeanServer;
import javax.management.ObjectName;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.mortbay.util.ajax.JSON;
/**
* Test {@link JournalNodeMXBean}
*/
public class TestJournalNodeMXBean {
private static final String NAMESERVICE = "ns1";
private static final int NUM_JN = 1;
private MiniJournalCluster jCluster;
private JournalNode jn;
@Before
public void setup() throws IOException {
// start 1 journal node
jCluster = new MiniJournalCluster.Builder(new Configuration()).format(true)
.numJournalNodes(NUM_JN).build();
jn = jCluster.getJournalNode(0);
}
@After
public void cleanup() throws IOException {
if (jCluster != null) {
jCluster.shutdown();
}
}
@Test
public void testJournalNodeMXBean() throws Exception {
// we have not formatted the journals yet, and the journal status in jmx
// should be empty since journal objects are created lazily
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName = new ObjectName(
"Hadoop:service=JournalNode,name=JournalNodeInfo");
// getJournalsStatus
String journalStatus = (String) mbs.getAttribute(mxbeanName,
"JournalsStatus");
assertEquals(jn.getJournalsStatus(), journalStatus);
assertFalse(journalStatus.contains(NAMESERVICE));
// format the journal ns1
final NamespaceInfo FAKE_NSINFO = new NamespaceInfo(12345, "mycluster",
"my-bp", 0L);
jn.getOrCreateJournal(NAMESERVICE).format(FAKE_NSINFO);
// check again after format
// getJournalsStatus
journalStatus = (String) mbs.getAttribute(mxbeanName, "JournalsStatus");
assertEquals(jn.getJournalsStatus(), journalStatus);
Map<String, Map<String, String>> jMap = new HashMap<String, Map<String, String>>();
Map<String, String> infoMap = new HashMap<String, String>();
infoMap.put("Formatted", "true");
jMap.put(NAMESERVICE, infoMap);
assertEquals(JSON.toString(jMap), journalStatus);
// restart journal node without formatting
jCluster = new MiniJournalCluster.Builder(new Configuration()).format(false)
.numJournalNodes(NUM_JN).build();
jn = jCluster.getJournalNode(0);
// re-check
journalStatus = (String) mbs.getAttribute(mxbeanName, "JournalsStatus");
assertEquals(jn.getJournalsStatus(), journalStatus);
jMap = new HashMap<String, Map<String, String>>();
infoMap = new HashMap<String, String>();
infoMap.put("Formatted", "true");
jMap.put(NAMESERVICE, infoMap);
assertEquals(JSON.toString(jMap), journalStatus);
}
}
| 3,893 | 35.055556 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.qjournal.server;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.File;
import java.net.HttpURLConnection;
import java.net.URL;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.qjournal.QJMTestUtil;
import org.apache.hadoop.hdfs.qjournal.client.IPCLoggerChannel;
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto;
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.MetricsAsserts;
import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.StopWatch;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import com.google.common.base.Charsets;
import com.google.common.primitives.Bytes;
import com.google.common.primitives.Ints;
public class TestJournalNode {
private static final NamespaceInfo FAKE_NSINFO = new NamespaceInfo(
12345, "mycluster", "my-bp", 0L);
private static final File TEST_BUILD_DATA = PathUtils.getTestDir(TestJournalNode.class);
private JournalNode jn;
private Journal journal;
private final Configuration conf = new Configuration();
private IPCLoggerChannel ch;
private String journalId;
static {
// Avoid an error when we double-initialize JvmMetrics
DefaultMetricsSystem.setMiniClusterMode(true);
}
@Before
public void setup() throws Exception {
File editsDir = new File(MiniDFSCluster.getBaseDirectory() +
File.separator + "TestJournalNode");
FileUtil.fullyDelete(editsDir);
conf.set(DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_KEY,
editsDir.getAbsolutePath());
conf.set(DFSConfigKeys.DFS_JOURNALNODE_RPC_ADDRESS_KEY,
"0.0.0.0:0");
jn = new JournalNode();
jn.setConf(conf);
jn.start();
journalId = "test-journalid-" + GenericTestUtils.uniqueSequenceId();
journal = jn.getOrCreateJournal(journalId);
journal.format(FAKE_NSINFO);
ch = new IPCLoggerChannel(conf, FAKE_NSINFO, journalId, jn.getBoundIpcAddress());
}
@After
public void teardown() throws Exception {
jn.stop(0);
}
@Test(timeout=100000)
public void testJournal() throws Exception {
MetricsRecordBuilder metrics = MetricsAsserts.getMetrics(
journal.getMetricsForTests().getName());
MetricsAsserts.assertCounter("BatchesWritten", 0L, metrics);
MetricsAsserts.assertCounter("BatchesWrittenWhileLagging", 0L, metrics);
MetricsAsserts.assertGauge("CurrentLagTxns", 0L, metrics);
IPCLoggerChannel ch = new IPCLoggerChannel(
conf, FAKE_NSINFO, journalId, jn.getBoundIpcAddress());
ch.newEpoch(1).get();
ch.setEpoch(1);
ch.startLogSegment(1, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get();
ch.sendEdits(1L, 1, 1, "hello".getBytes(Charsets.UTF_8)).get();
metrics = MetricsAsserts.getMetrics(
journal.getMetricsForTests().getName());
MetricsAsserts.assertCounter("BatchesWritten", 1L, metrics);
MetricsAsserts.assertCounter("BatchesWrittenWhileLagging", 0L, metrics);
MetricsAsserts.assertGauge("CurrentLagTxns", 0L, metrics);
ch.setCommittedTxId(100L);
ch.sendEdits(1L, 2, 1, "goodbye".getBytes(Charsets.UTF_8)).get();
metrics = MetricsAsserts.getMetrics(
journal.getMetricsForTests().getName());
MetricsAsserts.assertCounter("BatchesWritten", 2L, metrics);
MetricsAsserts.assertCounter("BatchesWrittenWhileLagging", 1L, metrics);
MetricsAsserts.assertGauge("CurrentLagTxns", 98L, metrics);
}
@Test(timeout=100000)
public void testReturnsSegmentInfoAtEpochTransition() throws Exception {
ch.newEpoch(1).get();
ch.setEpoch(1);
ch.startLogSegment(1, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get();
ch.sendEdits(1L, 1, 2, QJMTestUtil.createTxnData(1, 2)).get();
// Switch to a new epoch without closing earlier segment
NewEpochResponseProto response = ch.newEpoch(2).get();
ch.setEpoch(2);
assertEquals(1, response.getLastSegmentTxId());
ch.finalizeLogSegment(1, 2).get();
// Switch to a new epoch after just closing the earlier segment.
response = ch.newEpoch(3).get();
ch.setEpoch(3);
assertEquals(1, response.getLastSegmentTxId());
// Start a segment but don't write anything, check newEpoch segment info
ch.startLogSegment(3, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get();
response = ch.newEpoch(4).get();
ch.setEpoch(4);
// Because the new segment is empty, it is equivalent to not having
// started writing it. Hence, we should return the prior segment txid.
assertEquals(1, response.getLastSegmentTxId());
}
@Test(timeout=100000)
public void testHttpServer() throws Exception {
String urlRoot = jn.getHttpServerURI();
// Check default servlets.
String pageContents = DFSTestUtil.urlGet(new URL(urlRoot + "/jmx"));
assertTrue("Bad contents: " + pageContents,
pageContents.contains(
"Hadoop:service=JournalNode,name=JvmMetrics"));
// Create some edits on server side
byte[] EDITS_DATA = QJMTestUtil.createTxnData(1, 3);
IPCLoggerChannel ch = new IPCLoggerChannel(
conf, FAKE_NSINFO, journalId, jn.getBoundIpcAddress());
ch.newEpoch(1).get();
ch.setEpoch(1);
ch.startLogSegment(1, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get();
ch.sendEdits(1L, 1, 3, EDITS_DATA).get();
ch.finalizeLogSegment(1, 3).get();
// Attempt to retrieve via HTTP, ensure we get the data back
// including the header we expected
byte[] retrievedViaHttp = DFSTestUtil.urlGetBytes(new URL(urlRoot +
"/getJournal?segmentTxId=1&jid=" + journalId));
byte[] expected = Bytes.concat(
Ints.toByteArray(HdfsServerConstants.NAMENODE_LAYOUT_VERSION),
(new byte[] { 0, 0, 0, 0 }), // layout flags section
EDITS_DATA);
assertArrayEquals(expected, retrievedViaHttp);
// Attempt to fetch a non-existent file, check that we get an
// error status code
URL badUrl = new URL(urlRoot + "/getJournal?segmentTxId=12345&jid=" + journalId);
HttpURLConnection connection = (HttpURLConnection)badUrl.openConnection();
try {
assertEquals(404, connection.getResponseCode());
} finally {
connection.disconnect();
}
}
/**
* Test that the JournalNode performs correctly as a Paxos
* <em>Acceptor</em> process.
*/
@Test(timeout=100000)
public void testAcceptRecoveryBehavior() throws Exception {
// We need to run newEpoch() first, or else we have no way to distinguish
// different proposals for the same decision.
try {
ch.prepareRecovery(1L).get();
fail("Did not throw IllegalState when trying to run paxos without an epoch");
} catch (ExecutionException ise) {
GenericTestUtils.assertExceptionContains("bad epoch", ise);
}
ch.newEpoch(1).get();
ch.setEpoch(1);
// prepare() with no previously accepted value and no logs present
PrepareRecoveryResponseProto prep = ch.prepareRecovery(1L).get();
System.err.println("Prep: " + prep);
assertFalse(prep.hasAcceptedInEpoch());
assertFalse(prep.hasSegmentState());
// Make a log segment, and prepare again -- this time should see the
// segment existing.
ch.startLogSegment(1L, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get();
ch.sendEdits(1L, 1L, 1, QJMTestUtil.createTxnData(1, 1)).get();
prep = ch.prepareRecovery(1L).get();
System.err.println("Prep: " + prep);
assertFalse(prep.hasAcceptedInEpoch());
assertTrue(prep.hasSegmentState());
// accept() should save the accepted value in persistent storage
ch.acceptRecovery(prep.getSegmentState(), new URL("file:///dev/null")).get();
// So another prepare() call from a new epoch would return this value
ch.newEpoch(2);
ch.setEpoch(2);
prep = ch.prepareRecovery(1L).get();
assertEquals(1L, prep.getAcceptedInEpoch());
assertEquals(1L, prep.getSegmentState().getEndTxId());
// A prepare() or accept() call from an earlier epoch should now be rejected
ch.setEpoch(1);
try {
ch.prepareRecovery(1L).get();
fail("prepare from earlier epoch not rejected");
} catch (ExecutionException ioe) {
GenericTestUtils.assertExceptionContains(
"epoch 1 is less than the last promised epoch 2",
ioe);
}
try {
ch.acceptRecovery(prep.getSegmentState(), new URL("file:///dev/null")).get();
fail("accept from earlier epoch not rejected");
} catch (ExecutionException ioe) {
GenericTestUtils.assertExceptionContains(
"epoch 1 is less than the last promised epoch 2",
ioe);
}
}
@Test(timeout=100000)
public void testFailToStartWithBadConfig() throws Exception {
Configuration conf = new Configuration();
conf.set(DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_KEY, "non-absolute-path");
conf.set(DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
assertJNFailsToStart(conf, "should be an absolute path");
// Existing file which is not a directory
File existingFile = new File(TEST_BUILD_DATA, "testjournalnodefile");
assertTrue(existingFile.createNewFile());
try {
conf.set(DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_KEY,
existingFile.getAbsolutePath());
assertJNFailsToStart(conf, "Not a directory");
} finally {
existingFile.delete();
}
// Directory which cannot be created
conf.set(DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_KEY,
Shell.WINDOWS ? "\\\\cannotBeCreated" : "/proc/does-not-exist");
assertJNFailsToStart(conf, "Cannot create directory");
}
private static void assertJNFailsToStart(Configuration conf,
String errString) {
try {
JournalNode jn = new JournalNode();
jn.setConf(conf);
jn.start();
} catch (Exception e) {
GenericTestUtils.assertExceptionContains(errString, e);
}
}
/**
* Simple test of how fast the code path is to write edits.
* This isn't a true unit test, but can be run manually to
* check performance.
*
* At the time of development, this test ran in ~4sec on an
* SSD-enabled laptop (1.8ms/batch).
*/
@Test(timeout=100000)
public void testPerformance() throws Exception {
doPerfTest(8192, 1024); // 8MB
}
private void doPerfTest(int editsSize, int numEdits) throws Exception {
byte[] data = new byte[editsSize];
ch.newEpoch(1).get();
ch.setEpoch(1);
ch.startLogSegment(1, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get();
StopWatch sw = new StopWatch().start();
for (int i = 1; i < numEdits; i++) {
ch.sendEdits(1L, i, 1, data).get();
}
long time = sw.now(TimeUnit.MILLISECONDS);
System.err.println("Wrote " + numEdits + " batches of " + editsSize +
" bytes in " + time + "ms");
float avgRtt = (float)time/(float)numEdits;
long throughput = ((long)numEdits * editsSize * 1000L)/time;
System.err.println("Time per batch: " + avgRtt + "ms");
System.err.println("Throughput: " + throughput + " bytes/sec");
}
}
| 12,855 | 37.148368 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.mover;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSOutputStream;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
import org.apache.hadoop.hdfs.server.balancer.Dispatcher;
import org.apache.hadoop.hdfs.server.balancer.ExitStatus;
import org.apache.hadoop.hdfs.server.balancer.TestBalancer;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
import org.apache.hadoop.io.IOUtils;
import org.apache.log4j.Level;
import org.junit.Assert;
import org.junit.Test;
import com.google.common.base.Preconditions;
import com.google.common.collect.Maps;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_LAZY_WRITER_INTERVAL_SEC;
/**
* Test the data migration tool (for Archival Storage)
*/
public class TestStorageMover {
static final Log LOG = LogFactory.getLog(TestStorageMover.class);
static {
((Log4JLogger)LogFactory.getLog(BlockPlacementPolicy.class)
).getLogger().setLevel(Level.ALL);
((Log4JLogger)LogFactory.getLog(Dispatcher.class)
).getLogger().setLevel(Level.ALL);
((Log4JLogger)LogFactory.getLog(DataTransferProtocol.class)).getLogger()
.setLevel(Level.ALL);
}
private static final int BLOCK_SIZE = 1024;
private static final short REPL = 3;
private static final int NUM_DATANODES = 6;
private static final Configuration DEFAULT_CONF = new HdfsConfiguration();
private static final BlockStoragePolicySuite DEFAULT_POLICIES;
private static final BlockStoragePolicy HOT;
private static final BlockStoragePolicy WARM;
private static final BlockStoragePolicy COLD;
static {
DEFAULT_CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
DEFAULT_CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
DEFAULT_CONF.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY,
2L);
DEFAULT_CONF.setLong(DFSConfigKeys.DFS_MOVER_MOVEDWINWIDTH_KEY, 2000L);
DEFAULT_POLICIES = BlockStoragePolicySuite.createDefaultSuite();
HOT = DEFAULT_POLICIES.getPolicy(HdfsConstants.HOT_STORAGE_POLICY_NAME);
WARM = DEFAULT_POLICIES.getPolicy(HdfsConstants.WARM_STORAGE_POLICY_NAME);
COLD = DEFAULT_POLICIES.getPolicy(HdfsConstants.COLD_STORAGE_POLICY_NAME);
TestBalancer.initTestSetup();
Dispatcher.setDelayAfterErrors(1000L);
}
/**
* This scheme defines files/directories and their block storage policies. It
* also defines snapshots.
*/
static class NamespaceScheme {
final List<Path> dirs;
final List<Path> files;
final long fileSize;
final Map<Path, List<String>> snapshotMap;
final Map<Path, BlockStoragePolicy> policyMap;
NamespaceScheme(List<Path> dirs, List<Path> files, long fileSize,
Map<Path,List<String>> snapshotMap,
Map<Path, BlockStoragePolicy> policyMap) {
this.dirs = dirs == null? Collections.<Path>emptyList(): dirs;
this.files = files == null? Collections.<Path>emptyList(): files;
this.fileSize = fileSize;
this.snapshotMap = snapshotMap == null ?
Collections.<Path, List<String>>emptyMap() : snapshotMap;
this.policyMap = policyMap;
}
/**
* Create files/directories/snapshots.
*/
void prepare(DistributedFileSystem dfs, short repl) throws Exception {
for (Path d : dirs) {
dfs.mkdirs(d);
}
for (Path file : files) {
DFSTestUtil.createFile(dfs, file, fileSize, repl, 0L);
}
for (Map.Entry<Path, List<String>> entry : snapshotMap.entrySet()) {
for (String snapshot : entry.getValue()) {
SnapshotTestHelper.createSnapshot(dfs, entry.getKey(), snapshot);
}
}
}
/**
* Set storage policies according to the corresponding scheme.
*/
void setStoragePolicy(DistributedFileSystem dfs) throws Exception {
for (Map.Entry<Path, BlockStoragePolicy> entry : policyMap.entrySet()) {
dfs.setStoragePolicy(entry.getKey(), entry.getValue().getName());
}
}
}
/**
* This scheme defines DataNodes and their storage, including storage types
* and remaining capacities.
*/
static class ClusterScheme {
final Configuration conf;
final int numDataNodes;
final short repl;
final StorageType[][] storageTypes;
final long[][] storageCapacities;
ClusterScheme() {
this(DEFAULT_CONF, NUM_DATANODES, REPL,
genStorageTypes(NUM_DATANODES), null);
}
ClusterScheme(Configuration conf, int numDataNodes, short repl,
StorageType[][] types, long[][] capacities) {
Preconditions.checkArgument(types == null || types.length == numDataNodes);
Preconditions.checkArgument(capacities == null || capacities.length ==
numDataNodes);
this.conf = conf;
this.numDataNodes = numDataNodes;
this.repl = repl;
this.storageTypes = types;
this.storageCapacities = capacities;
}
}
class MigrationTest {
private final ClusterScheme clusterScheme;
private final NamespaceScheme nsScheme;
private final Configuration conf;
private MiniDFSCluster cluster;
private DistributedFileSystem dfs;
private final BlockStoragePolicySuite policies;
MigrationTest(ClusterScheme cScheme, NamespaceScheme nsScheme) {
this.clusterScheme = cScheme;
this.nsScheme = nsScheme;
this.conf = clusterScheme.conf;
this.policies = DEFAULT_POLICIES;
}
/**
* Set up the cluster and start NameNode and DataNodes according to the
* corresponding scheme.
*/
void setupCluster() throws Exception {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(clusterScheme
.numDataNodes).storageTypes(clusterScheme.storageTypes)
.storageCapacities(clusterScheme.storageCapacities).build();
cluster.waitActive();
dfs = cluster.getFileSystem();
}
private void runBasicTest(boolean shutdown) throws Exception {
setupCluster();
try {
prepareNamespace();
verify(true);
setStoragePolicy();
migrate(ExitStatus.SUCCESS);
verify(true);
} finally {
if (shutdown) {
shutdownCluster();
}
}
}
void shutdownCluster() throws Exception {
IOUtils.cleanup(null, dfs);
if (cluster != null) {
cluster.shutdown();
}
}
/**
* Create files/directories and set their storage policies according to the
* corresponding scheme.
*/
void prepareNamespace() throws Exception {
nsScheme.prepare(dfs, clusterScheme.repl);
}
void setStoragePolicy() throws Exception {
nsScheme.setStoragePolicy(dfs);
}
/**
* Run the migration tool.
*/
void migrate(ExitStatus expectedExitCode) throws Exception {
runMover(expectedExitCode);
Thread.sleep(5000); // let the NN finish deletion
}
/**
* Verify block locations after running the migration tool.
*/
void verify(boolean verifyAll) throws Exception {
for (DataNode dn : cluster.getDataNodes()) {
DataNodeTestUtils.triggerBlockReport(dn);
}
if (verifyAll) {
verifyNamespace();
}
}
private void runMover(ExitStatus expectedExitCode) throws Exception {
Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
Map<URI, List<Path>> nnMap = Maps.newHashMap();
for (URI nn : namenodes) {
nnMap.put(nn, null);
}
int result = Mover.run(nnMap, conf);
Assert.assertEquals(expectedExitCode.getExitCode(), result);
}
private void verifyNamespace() throws Exception {
HdfsFileStatus status = dfs.getClient().getFileInfo("/");
verifyRecursively(null, status);
}
private void verifyRecursively(final Path parent,
final HdfsFileStatus status) throws Exception {
if (status.isDir()) {
Path fullPath = parent == null ?
new Path("/") : status.getFullPath(parent);
DirectoryListing children = dfs.getClient().listPaths(
fullPath.toString(), HdfsFileStatus.EMPTY_NAME, true);
for (HdfsFileStatus child : children.getPartialListing()) {
verifyRecursively(fullPath, child);
}
} else if (!status.isSymlink()) { // is file
verifyFile(parent, status, null);
}
}
void verifyFile(final Path file, final Byte expectedPolicyId)
throws Exception {
final Path parent = file.getParent();
DirectoryListing children = dfs.getClient().listPaths(
parent.toString(), HdfsFileStatus.EMPTY_NAME, true);
for (HdfsFileStatus child : children.getPartialListing()) {
if (child.getLocalName().equals(file.getName())) {
verifyFile(parent, child, expectedPolicyId);
return;
}
}
Assert.fail("File " + file + " not found.");
}
private void verifyFile(final Path parent, final HdfsFileStatus status,
final Byte expectedPolicyId) throws Exception {
HdfsLocatedFileStatus fileStatus = (HdfsLocatedFileStatus) status;
byte policyId = fileStatus.getStoragePolicy();
BlockStoragePolicy policy = policies.getPolicy(policyId);
if (expectedPolicyId != null) {
Assert.assertEquals((byte)expectedPolicyId, policy.getId());
}
final List<StorageType> types = policy.chooseStorageTypes(
status.getReplication());
for(LocatedBlock lb : fileStatus.getBlockLocations().getLocatedBlocks()) {
final Mover.StorageTypeDiff diff = new Mover.StorageTypeDiff(types,
lb.getStorageTypes());
Assert.assertTrue(fileStatus.getFullName(parent.toString())
+ " with policy " + policy + " has non-empty overlap: " + diff
+ ", the corresponding block is " + lb.getBlock().getLocalBlock(),
diff.removeOverlap(true));
}
}
Replication getReplication(Path file) throws IOException {
return getOrVerifyReplication(file, null);
}
Replication verifyReplication(Path file, int expectedDiskCount,
int expectedArchiveCount) throws IOException {
final Replication r = new Replication();
r.disk = expectedDiskCount;
r.archive = expectedArchiveCount;
return getOrVerifyReplication(file, r);
}
private Replication getOrVerifyReplication(Path file, Replication expected)
throws IOException {
final List<LocatedBlock> lbs = dfs.getClient().getLocatedBlocks(
file.toString(), 0).getLocatedBlocks();
Assert.assertEquals(1, lbs.size());
LocatedBlock lb = lbs.get(0);
StringBuilder types = new StringBuilder();
final Replication r = new Replication();
for(StorageType t : lb.getStorageTypes()) {
types.append(t).append(", ");
if (t == StorageType.DISK) {
r.disk++;
} else if (t == StorageType.ARCHIVE) {
r.archive++;
} else {
Assert.fail("Unexpected storage type " + t);
}
}
if (expected != null) {
final String s = "file = " + file + "\n types = [" + types + "]";
Assert.assertEquals(s, expected, r);
}
return r;
}
}
static class Replication {
int disk;
int archive;
@Override
public int hashCode() {
return disk ^ archive;
}
@Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
} else if (obj == null || !(obj instanceof Replication)) {
return false;
}
final Replication that = (Replication)obj;
return this.disk == that.disk && this.archive == that.archive;
}
@Override
public String toString() {
return "[disk=" + disk + ", archive=" + archive + "]";
}
}
private static StorageType[][] genStorageTypes(int numDataNodes) {
return genStorageTypes(numDataNodes, 0, 0, 0);
}
private static StorageType[][] genStorageTypes(int numDataNodes,
int numAllDisk, int numAllArchive) {
return genStorageTypes(numDataNodes, numAllDisk, numAllArchive, 0);
}
private static StorageType[][] genStorageTypes(int numDataNodes,
int numAllDisk, int numAllArchive, int numRamDisk) {
Preconditions.checkArgument(
(numAllDisk + numAllArchive + numRamDisk) <= numDataNodes);
StorageType[][] types = new StorageType[numDataNodes][];
int i = 0;
for (; i < numRamDisk; i++)
{
types[i] = new StorageType[]{StorageType.RAM_DISK, StorageType.DISK};
}
for (; i < numRamDisk + numAllDisk; i++) {
types[i] = new StorageType[]{StorageType.DISK, StorageType.DISK};
}
for (; i < numRamDisk + numAllDisk + numAllArchive; i++) {
types[i] = new StorageType[]{StorageType.ARCHIVE, StorageType.ARCHIVE};
}
for (; i < types.length; i++) {
types[i] = new StorageType[]{StorageType.DISK, StorageType.ARCHIVE};
}
return types;
}
private static long[][] genCapacities(int nDatanodes, int numAllDisk,
int numAllArchive, int numRamDisk, long diskCapacity,
long archiveCapacity, long ramDiskCapacity) {
final long[][] capacities = new long[nDatanodes][];
int i = 0;
for (; i < numRamDisk; i++) {
capacities[i] = new long[]{ramDiskCapacity, diskCapacity};
}
for (; i < numRamDisk + numAllDisk; i++) {
capacities[i] = new long[]{diskCapacity, diskCapacity};
}
for (; i < numRamDisk + numAllDisk + numAllArchive; i++) {
capacities[i] = new long[]{archiveCapacity, archiveCapacity};
}
for(; i < capacities.length; i++) {
capacities[i] = new long[]{diskCapacity, archiveCapacity};
}
return capacities;
}
private static class PathPolicyMap {
final Map<Path, BlockStoragePolicy> map = Maps.newHashMap();
final Path hot = new Path("/hot");
final Path warm = new Path("/warm");
final Path cold = new Path("/cold");
final List<Path> files;
PathPolicyMap(int filesPerDir){
map.put(hot, HOT);
map.put(warm, WARM);
map.put(cold, COLD);
files = new ArrayList<Path>();
for(Path dir : map.keySet()) {
for(int i = 0; i < filesPerDir; i++) {
files.add(new Path(dir, "file" + i));
}
}
}
NamespaceScheme newNamespaceScheme() {
return new NamespaceScheme(Arrays.asList(hot, warm, cold),
files, BLOCK_SIZE/2, null, map);
}
/**
* Move hot files to warm and cold, warm files to hot and cold,
* and cold files to hot and warm.
*/
void moveAround(DistributedFileSystem dfs) throws Exception {
for(Path srcDir : map.keySet()) {
int i = 0;
for(Path dstDir : map.keySet()) {
if (!srcDir.equals(dstDir)) {
final Path src = new Path(srcDir, "file" + i++);
final Path dst = new Path(dstDir, srcDir.getName() + "2" + dstDir.getName());
LOG.info("rename " + src + " to " + dst);
dfs.rename(src, dst);
}
}
}
}
}
/**
* A normal case for Mover: move a file into archival storage
*/
@Test
public void testMigrateFileToArchival() throws Exception {
LOG.info("testMigrateFileToArchival");
final Path foo = new Path("/foo");
Map<Path, BlockStoragePolicy> policyMap = Maps.newHashMap();
policyMap.put(foo, COLD);
NamespaceScheme nsScheme = new NamespaceScheme(null, Arrays.asList(foo),
2*BLOCK_SIZE, null, policyMap);
ClusterScheme clusterScheme = new ClusterScheme(DEFAULT_CONF,
NUM_DATANODES, REPL, genStorageTypes(NUM_DATANODES), null);
new MigrationTest(clusterScheme, nsScheme).runBasicTest(true);
}
/**
* Print a big banner in the test log to make debug easier.
*/
static void banner(String string) {
LOG.info("\n\n\n\n================================================\n" +
string + "\n" +
"==================================================\n\n");
}
/**
* Run Mover with arguments specifying files and directories
*/
@Test
public void testMoveSpecificPaths() throws Exception {
LOG.info("testMoveSpecificPaths");
final Path foo = new Path("/foo");
final Path barFile = new Path(foo, "bar");
final Path foo2 = new Path("/foo2");
final Path bar2File = new Path(foo2, "bar2");
Map<Path, BlockStoragePolicy> policyMap = Maps.newHashMap();
policyMap.put(foo, COLD);
policyMap.put(foo2, WARM);
NamespaceScheme nsScheme = new NamespaceScheme(Arrays.asList(foo, foo2),
Arrays.asList(barFile, bar2File), BLOCK_SIZE, null, policyMap);
ClusterScheme clusterScheme = new ClusterScheme(DEFAULT_CONF,
NUM_DATANODES, REPL, genStorageTypes(NUM_DATANODES), null);
MigrationTest test = new MigrationTest(clusterScheme, nsScheme);
test.setupCluster();
try {
test.prepareNamespace();
test.setStoragePolicy();
Map<URI, List<Path>> map = Mover.Cli.getNameNodePathsToMove(test.conf,
"-p", "/foo/bar", "/foo2");
int result = Mover.run(map, test.conf);
Assert.assertEquals(ExitStatus.SUCCESS.getExitCode(), result);
Thread.sleep(5000);
test.verify(true);
} finally {
test.shutdownCluster();
}
}
/**
* Move an open file into archival storage
*/
@Test
public void testMigrateOpenFileToArchival() throws Exception {
LOG.info("testMigrateOpenFileToArchival");
final Path fooDir = new Path("/foo");
Map<Path, BlockStoragePolicy> policyMap = Maps.newHashMap();
policyMap.put(fooDir, COLD);
NamespaceScheme nsScheme = new NamespaceScheme(Arrays.asList(fooDir), null,
BLOCK_SIZE, null, policyMap);
ClusterScheme clusterScheme = new ClusterScheme(DEFAULT_CONF,
NUM_DATANODES, REPL, genStorageTypes(NUM_DATANODES), null);
MigrationTest test = new MigrationTest(clusterScheme, nsScheme);
test.setupCluster();
// create an open file
banner("writing to file /foo/bar");
final Path barFile = new Path(fooDir, "bar");
DFSTestUtil.createFile(test.dfs, barFile, BLOCK_SIZE, (short) 1, 0L);
FSDataOutputStream out = test.dfs.append(barFile);
out.writeBytes("hello, ");
((DFSOutputStream) out.getWrappedStream()).hsync();
try {
banner("start data migration");
test.setStoragePolicy(); // set /foo to COLD
test.migrate(ExitStatus.SUCCESS);
// make sure the under construction block has not been migrated
LocatedBlocks lbs = test.dfs.getClient().getLocatedBlocks(
barFile.toString(), BLOCK_SIZE);
LOG.info("Locations: " + lbs);
List<LocatedBlock> blks = lbs.getLocatedBlocks();
Assert.assertEquals(1, blks.size());
Assert.assertEquals(1, blks.get(0).getLocations().length);
banner("finish the migration, continue writing");
// make sure the writing can continue
out.writeBytes("world!");
((DFSOutputStream) out.getWrappedStream()).hsync();
IOUtils.cleanup(LOG, out);
lbs = test.dfs.getClient().getLocatedBlocks(
barFile.toString(), BLOCK_SIZE);
LOG.info("Locations: " + lbs);
blks = lbs.getLocatedBlocks();
Assert.assertEquals(1, blks.size());
Assert.assertEquals(1, blks.get(0).getLocations().length);
banner("finish writing, starting reading");
// check the content of /foo/bar
FSDataInputStream in = test.dfs.open(barFile);
byte[] buf = new byte[13];
// read from offset 1024
in.readFully(BLOCK_SIZE, buf, 0, buf.length);
IOUtils.cleanup(LOG, in);
Assert.assertEquals("hello, world!", new String(buf));
} finally {
test.shutdownCluster();
}
}
/**
* Test directories with Hot, Warm and Cold polices.
*/
@Test
public void testHotWarmColdDirs() throws Exception {
LOG.info("testHotWarmColdDirs");
PathPolicyMap pathPolicyMap = new PathPolicyMap(3);
NamespaceScheme nsScheme = pathPolicyMap.newNamespaceScheme();
ClusterScheme clusterScheme = new ClusterScheme();
MigrationTest test = new MigrationTest(clusterScheme, nsScheme);
try {
test.runBasicTest(false);
pathPolicyMap.moveAround(test.dfs);
test.migrate(ExitStatus.SUCCESS);
test.verify(true);
} finally {
test.shutdownCluster();
}
}
private void waitForAllReplicas(int expectedReplicaNum, Path file,
DistributedFileSystem dfs) throws Exception {
for (int i = 0; i < 5; i++) {
LocatedBlocks lbs = dfs.getClient().getLocatedBlocks(file.toString(), 0,
BLOCK_SIZE);
LocatedBlock lb = lbs.get(0);
if (lb.getLocations().length >= expectedReplicaNum) {
return;
} else {
Thread.sleep(1000);
}
}
}
private void setVolumeFull(DataNode dn, StorageType type) {
try (FsDatasetSpi.FsVolumeReferences refs = dn.getFSDataset()
.getFsVolumeReferences()) {
for (FsVolumeSpi fvs : refs) {
FsVolumeImpl volume = (FsVolumeImpl) fvs;
if (volume.getStorageType() == type) {
LOG.info("setCapacity to 0 for [" + volume.getStorageType() + "]"
+ volume.getStorageID());
volume.setCapacityForTesting(0);
}
}
} catch (IOException e) {
LOG.error("Unexpected exception by closing FsVolumeReference", e);
}
}
/**
* Test DISK is running out of spaces.
*/
@Test
public void testNoSpaceDisk() throws Exception {
LOG.info("testNoSpaceDisk");
final PathPolicyMap pathPolicyMap = new PathPolicyMap(0);
final NamespaceScheme nsScheme = pathPolicyMap.newNamespaceScheme();
Configuration conf = new Configuration(DEFAULT_CONF);
final ClusterScheme clusterScheme = new ClusterScheme(conf,
NUM_DATANODES, REPL, genStorageTypes(NUM_DATANODES), null);
final MigrationTest test = new MigrationTest(clusterScheme, nsScheme);
try {
test.runBasicTest(false);
// create 2 hot files with replication 3
final short replication = 3;
for (int i = 0; i < 2; i++) {
final Path p = new Path(pathPolicyMap.hot, "file" + i);
DFSTestUtil.createFile(test.dfs, p, BLOCK_SIZE, replication, 0L);
waitForAllReplicas(replication, p, test.dfs);
}
// set all the DISK volume to full
for (DataNode dn : test.cluster.getDataNodes()) {
setVolumeFull(dn, StorageType.DISK);
DataNodeTestUtils.triggerHeartbeat(dn);
}
// test increasing replication. Since DISK is full,
// new replicas should be stored in ARCHIVE as a fallback storage.
final Path file0 = new Path(pathPolicyMap.hot, "file0");
final Replication r = test.getReplication(file0);
final short newReplication = (short) 5;
test.dfs.setReplication(file0, newReplication);
Thread.sleep(10000);
test.verifyReplication(file0, r.disk, newReplication - r.disk);
// test creating a cold file and then increase replication
final Path p = new Path(pathPolicyMap.cold, "foo");
DFSTestUtil.createFile(test.dfs, p, BLOCK_SIZE, replication, 0L);
test.verifyReplication(p, 0, replication);
test.dfs.setReplication(p, newReplication);
Thread.sleep(10000);
test.verifyReplication(p, 0, newReplication);
//test move a hot file to warm
final Path file1 = new Path(pathPolicyMap.hot, "file1");
test.dfs.rename(file1, pathPolicyMap.warm);
test.migrate(ExitStatus.NO_MOVE_BLOCK);
test.verifyFile(new Path(pathPolicyMap.warm, "file1"), WARM.getId());
} finally {
test.shutdownCluster();
}
}
/**
* Test ARCHIVE is running out of spaces.
*/
@Test
public void testNoSpaceArchive() throws Exception {
LOG.info("testNoSpaceArchive");
final PathPolicyMap pathPolicyMap = new PathPolicyMap(0);
final NamespaceScheme nsScheme = pathPolicyMap.newNamespaceScheme();
final ClusterScheme clusterScheme = new ClusterScheme(DEFAULT_CONF,
NUM_DATANODES, REPL, genStorageTypes(NUM_DATANODES), null);
final MigrationTest test = new MigrationTest(clusterScheme, nsScheme);
try {
test.runBasicTest(false);
// create 2 hot files with replication 3
final short replication = 3;
for (int i = 0; i < 2; i++) {
final Path p = new Path(pathPolicyMap.cold, "file" + i);
DFSTestUtil.createFile(test.dfs, p, BLOCK_SIZE, replication, 0L);
waitForAllReplicas(replication, p, test.dfs);
}
// set all the ARCHIVE volume to full
for (DataNode dn : test.cluster.getDataNodes()) {
setVolumeFull(dn, StorageType.ARCHIVE);
DataNodeTestUtils.triggerHeartbeat(dn);
}
{ // test increasing replication but new replicas cannot be created
// since no more ARCHIVE space.
final Path file0 = new Path(pathPolicyMap.cold, "file0");
final Replication r = test.getReplication(file0);
Assert.assertEquals(0, r.disk);
final short newReplication = (short) 5;
test.dfs.setReplication(file0, newReplication);
Thread.sleep(10000);
test.verifyReplication(file0, 0, r.archive);
}
{ // test creating a hot file
final Path p = new Path(pathPolicyMap.hot, "foo");
DFSTestUtil.createFile(test.dfs, p, BLOCK_SIZE, (short) 3, 0L);
}
{ //test move a cold file to warm
final Path file1 = new Path(pathPolicyMap.cold, "file1");
test.dfs.rename(file1, pathPolicyMap.warm);
test.migrate(ExitStatus.SUCCESS);
test.verify(true);
}
} finally {
test.shutdownCluster();
}
}
}
| 28,144 | 34.536616 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.mover;
import java.io.IOException;
import java.net.URI;
import java.util.*;
import java.util.concurrent.atomic.AtomicInteger;
import com.google.common.collect.Maps;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.balancer.Dispatcher.DBlock;
import org.apache.hadoop.hdfs.server.balancer.ExitStatus;
import org.apache.hadoop.hdfs.server.balancer.NameNodeConnector;
import org.apache.hadoop.hdfs.server.balancer.TestBalancer;
import org.apache.hadoop.hdfs.server.mover.Mover.MLocation;
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.ToolRunner;
import org.junit.Assert;
import org.junit.Test;
public class TestMover {
static final int DEFAULT_BLOCK_SIZE = 100;
static {
TestBalancer.initTestSetup();
}
static void initConf(Configuration conf) {
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE);
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
conf.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1L);
conf.setLong(DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_KEY, 2000L);
}
static Mover newMover(Configuration conf) throws IOException {
final Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
Assert.assertEquals(1, namenodes.size());
Map<URI, List<Path>> nnMap = Maps.newHashMap();
for (URI nn : namenodes) {
nnMap.put(nn, null);
}
final List<NameNodeConnector> nncs = NameNodeConnector.newNameNodeConnectors(
nnMap, Mover.class.getSimpleName(), Mover.MOVER_ID_PATH, conf,
NameNodeConnector.DEFAULT_MAX_IDLE_ITERATIONS);
return new Mover(nncs.get(0), conf, new AtomicInteger(0));
}
@Test
public void testScheduleSameBlock() throws IOException {
final Configuration conf = new HdfsConfiguration();
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(4).build();
try {
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final String file = "/testScheduleSameBlock/file";
{
final FSDataOutputStream out = dfs.create(new Path(file));
out.writeChars("testScheduleSameBlock");
out.close();
}
final Mover mover = newMover(conf);
mover.init();
final Mover.Processor processor = mover.new Processor();
final LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
final List<MLocation> locations = MLocation.toLocations(lb);
final MLocation ml = locations.get(0);
final DBlock db = mover.newDBlock(lb.getBlock().getLocalBlock(), locations);
final List<StorageType> storageTypes = new ArrayList<StorageType>(
Arrays.asList(StorageType.DEFAULT, StorageType.DEFAULT));
Assert.assertTrue(processor.scheduleMoveReplica(db, ml, storageTypes));
Assert.assertFalse(processor.scheduleMoveReplica(db, ml, storageTypes));
} finally {
cluster.shutdown();
}
}
@Test
public void testScheduleBlockWithinSameNode() throws Exception {
final Configuration conf = new HdfsConfiguration();
initConf(conf);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(3)
.storageTypes(
new StorageType[] { StorageType.DISK, StorageType.ARCHIVE })
.build();
try {
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final String file = "/testScheduleWithinSameNode/file";
Path dir = new Path("/testScheduleWithinSameNode");
dfs.mkdirs(dir);
// write to DISK
dfs.setStoragePolicy(dir, "HOT");
{
final FSDataOutputStream out = dfs.create(new Path(file));
out.writeChars("testScheduleWithinSameNode");
out.close();
}
//verify before movement
LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
StorageType[] storageTypes = lb.getStorageTypes();
for (StorageType storageType : storageTypes) {
Assert.assertTrue(StorageType.DISK == storageType);
}
// move to ARCHIVE
dfs.setStoragePolicy(dir, "COLD");
int rc = ToolRunner.run(conf, new Mover.Cli(),
new String[] { "-p", dir.toString() });
Assert.assertEquals("Movement to ARCHIVE should be successfull", 0, rc);
// Wait till namenode notified
Thread.sleep(3000);
lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
storageTypes = lb.getStorageTypes();
for (StorageType storageType : storageTypes) {
Assert.assertTrue(StorageType.ARCHIVE == storageType);
}
} finally {
cluster.shutdown();
}
}
private void checkMovePaths(List<Path> actual, Path... expected) {
Assert.assertEquals(expected.length, actual.size());
for (Path p : expected) {
Assert.assertTrue(actual.contains(p));
}
}
/**
* Test Mover Cli by specifying a list of files/directories using option "-p".
* There is only one namenode (and hence name service) specified in the conf.
*/
@Test
public void testMoverCli() throws Exception {
final MiniDFSCluster cluster = new MiniDFSCluster
.Builder(new HdfsConfiguration()).numDataNodes(0).build();
try {
final Configuration conf = cluster.getConfiguration(0);
try {
Mover.Cli.getNameNodePathsToMove(conf, "-p", "/foo", "bar");
Assert.fail("Expected exception for illegal path bar");
} catch (IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("bar is not absolute", e);
}
Map<URI, List<Path>> movePaths = Mover.Cli.getNameNodePathsToMove(conf);
Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
Assert.assertEquals(1, namenodes.size());
Assert.assertEquals(1, movePaths.size());
URI nn = namenodes.iterator().next();
Assert.assertTrue(movePaths.containsKey(nn));
Assert.assertNull(movePaths.get(nn));
movePaths = Mover.Cli.getNameNodePathsToMove(conf, "-p", "/foo", "/bar");
namenodes = DFSUtil.getNsServiceRpcUris(conf);
Assert.assertEquals(1, movePaths.size());
nn = namenodes.iterator().next();
Assert.assertTrue(movePaths.containsKey(nn));
checkMovePaths(movePaths.get(nn), new Path("/foo"), new Path("/bar"));
} finally {
cluster.shutdown();
}
}
@Test
public void testMoverCliWithHAConf() throws Exception {
final Configuration conf = new HdfsConfiguration();
final MiniDFSCluster cluster = new MiniDFSCluster
.Builder(new HdfsConfiguration())
.nnTopology(MiniDFSNNTopology.simpleHATopology())
.numDataNodes(0).build();
HATestUtil.setFailoverConfigurations(cluster, conf, "MyCluster");
try {
Map<URI, List<Path>> movePaths = Mover.Cli.getNameNodePathsToMove(conf,
"-p", "/foo", "/bar");
Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
Assert.assertEquals(1, namenodes.size());
Assert.assertEquals(1, movePaths.size());
URI nn = namenodes.iterator().next();
Assert.assertEquals(new URI("hdfs://MyCluster"), nn);
Assert.assertTrue(movePaths.containsKey(nn));
checkMovePaths(movePaths.get(nn), new Path("/foo"), new Path("/bar"));
} finally {
cluster.shutdown();
}
}
@Test
public void testMoverCliWithFederation() throws Exception {
final MiniDFSCluster cluster = new MiniDFSCluster
.Builder(new HdfsConfiguration())
.nnTopology(MiniDFSNNTopology.simpleFederatedTopology(3))
.numDataNodes(0).build();
final Configuration conf = new HdfsConfiguration();
DFSTestUtil.setFederatedConfiguration(cluster, conf);
try {
Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
Assert.assertEquals(3, namenodes.size());
try {
Mover.Cli.getNameNodePathsToMove(conf, "-p", "/foo");
Assert.fail("Expect exception for missing authority information");
} catch (IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains(
"does not contain scheme and authority", e);
}
try {
Mover.Cli.getNameNodePathsToMove(conf, "-p", "hdfs:///foo");
Assert.fail("Expect exception for missing authority information");
} catch (IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains(
"does not contain scheme and authority", e);
}
try {
Mover.Cli.getNameNodePathsToMove(conf, "-p", "wrong-hdfs://ns1/foo");
Assert.fail("Expect exception for wrong scheme");
} catch (IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("Cannot resolve the path", e);
}
Iterator<URI> iter = namenodes.iterator();
URI nn1 = iter.next();
URI nn2 = iter.next();
Map<URI, List<Path>> movePaths = Mover.Cli.getNameNodePathsToMove(conf,
"-p", nn1 + "/foo", nn1 + "/bar", nn2 + "/foo/bar");
Assert.assertEquals(2, movePaths.size());
checkMovePaths(movePaths.get(nn1), new Path("/foo"), new Path("/bar"));
checkMovePaths(movePaths.get(nn2), new Path("/foo/bar"));
} finally {
cluster.shutdown();
}
}
@Test
public void testMoverCliWithFederationHA() throws Exception {
final MiniDFSCluster cluster = new MiniDFSCluster
.Builder(new HdfsConfiguration())
.nnTopology(MiniDFSNNTopology.simpleHAFederatedTopology(3))
.numDataNodes(0).build();
final Configuration conf = new HdfsConfiguration();
DFSTestUtil.setFederatedHAConfiguration(cluster, conf);
try {
Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
Assert.assertEquals(3, namenodes.size());
Iterator<URI> iter = namenodes.iterator();
URI nn1 = iter.next();
URI nn2 = iter.next();
URI nn3 = iter.next();
Map<URI, List<Path>> movePaths = Mover.Cli.getNameNodePathsToMove(conf,
"-p", nn1 + "/foo", nn1 + "/bar", nn2 + "/foo/bar", nn3 + "/foobar");
Assert.assertEquals(3, movePaths.size());
checkMovePaths(movePaths.get(nn1), new Path("/foo"), new Path("/bar"));
checkMovePaths(movePaths.get(nn2), new Path("/foo/bar"));
checkMovePaths(movePaths.get(nn3), new Path("/foobar"));
} finally {
cluster.shutdown();
}
}
@Test(timeout = 300000)
public void testTwoReplicaSameStorageTypeShouldNotSelect() throws Exception {
// HDFS-8147
final Configuration conf = new HdfsConfiguration();
initConf(conf);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(3)
.storageTypes(
new StorageType[][] { { StorageType.DISK, StorageType.ARCHIVE },
{ StorageType.DISK, StorageType.DISK },
{ StorageType.DISK, StorageType.ARCHIVE } }).build();
try {
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final String file = "/testForTwoReplicaSameStorageTypeShouldNotSelect";
// write to DISK
final FSDataOutputStream out = dfs.create(new Path(file), (short) 2);
out.writeChars("testForTwoReplicaSameStorageTypeShouldNotSelect");
out.close();
// verify before movement
LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
StorageType[] storageTypes = lb.getStorageTypes();
for (StorageType storageType : storageTypes) {
Assert.assertTrue(StorageType.DISK == storageType);
}
// move to ARCHIVE
dfs.setStoragePolicy(new Path(file), "COLD");
int rc = ToolRunner.run(conf, new Mover.Cli(),
new String[] { "-p", file.toString() });
Assert.assertEquals("Movement to ARCHIVE should be successfull", 0, rc);
// Wait till namenode notified
Thread.sleep(3000);
lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
storageTypes = lb.getStorageTypes();
int archiveCount = 0;
for (StorageType storageType : storageTypes) {
if (StorageType.ARCHIVE == storageType) {
archiveCount++;
}
}
Assert.assertEquals(archiveCount, 2);
} finally {
cluster.shutdown();
}
}
@Test(timeout = 300000)
public void testMoveWhenStoragePolicyNotSatisfying() throws Exception {
// HDFS-8147
final Configuration conf = new HdfsConfiguration();
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(3)
.storageTypes(
new StorageType[][] { { StorageType.DISK }, { StorageType.DISK },
{ StorageType.DISK } }).build();
try {
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final String file = "/testMoveWhenStoragePolicyNotSatisfying";
// write to DISK
final FSDataOutputStream out = dfs.create(new Path(file));
out.writeChars("testMoveWhenStoragePolicyNotSatisfying");
out.close();
// move to ARCHIVE
dfs.setStoragePolicy(new Path(file), "COLD");
int rc = ToolRunner.run(conf, new Mover.Cli(),
new String[] { "-p", file.toString() });
int exitcode = ExitStatus.NO_MOVE_BLOCK.getExitCode();
Assert.assertEquals("Exit code should be " + exitcode, exitcode, rc);
} finally {
cluster.shutdown();
}
}
@Test
public void testMoverFailedRetry() throws Exception {
// HDFS-8147
final Configuration conf = new HdfsConfiguration();
initConf(conf);
conf.set(DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_KEY, "2");
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(3)
.storageTypes(
new StorageType[][] {{StorageType.DISK, StorageType.ARCHIVE},
{StorageType.DISK, StorageType.ARCHIVE},
{StorageType.DISK, StorageType.ARCHIVE}}).build();
try {
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final String file = "/testMoverFailedRetry";
// write to DISK
final FSDataOutputStream out = dfs.create(new Path(file), (short) 2);
out.writeChars("testMoverFailedRetry");
out.close();
// Delete block file so, block move will fail with FileNotFoundException
LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
cluster.corruptBlockOnDataNodesByDeletingBlockFile(lb.getBlock());
// move to ARCHIVE
dfs.setStoragePolicy(new Path(file), "COLD");
int rc = ToolRunner.run(conf, new Mover.Cli(),
new String[] {"-p", file.toString()});
Assert.assertEquals("Movement should fail after some retry",
ExitStatus.NO_MOVE_PROGRESS.getExitCode(), rc);
} finally {
cluster.shutdown();
}
}
}
| 16,296 | 38.460048 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestGetUriFromString.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.common;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import java.io.IOException;
import java.net.URI;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.junit.Test;
/**
* This is a unit test, which tests {@link Util#stringAsURI(String)}
* for Windows and Unix style file paths.
*/
public class TestGetUriFromString {
private static final Log LOG = LogFactory.getLog(TestGetUriFromString.class);
private static final String RELATIVE_FILE_PATH = "relativeFilePath";
private static final String ABSOLUTE_PATH_UNIX = "/tmp/file1";
private static final String ABSOLUTE_PATH_WINDOWS =
"C:\\Documents and Settings\\All Users";
private static final String URI_FILE_SCHEMA = "file";
private static final String URI_PATH_UNIX = "/var/www";
private static final String URI_PATH_WINDOWS =
"/C:/Documents%20and%20Settings/All%20Users";
private static final String URI_UNIX = URI_FILE_SCHEMA + "://"
+ URI_PATH_UNIX;
private static final String URI_WINDOWS = URI_FILE_SCHEMA + "://"
+ URI_PATH_WINDOWS;
/**
* Test for a relative path, os independent
* @throws IOException
*/
@Test
public void testRelativePathAsURI() throws IOException {
URI u = Util.stringAsURI(RELATIVE_FILE_PATH);
LOG.info("Uri: " + u);
assertNotNull(u);
}
/**
* Test for an OS dependent absolute paths.
* @throws IOException
*/
@Test
public void testAbsolutePathAsURI() throws IOException {
URI u = null;
u = Util.stringAsURI(ABSOLUTE_PATH_WINDOWS);
assertNotNull(
"Uri should not be null for Windows path" + ABSOLUTE_PATH_WINDOWS, u);
assertEquals(URI_FILE_SCHEMA, u.getScheme());
u = Util.stringAsURI(ABSOLUTE_PATH_UNIX);
assertNotNull("Uri should not be null for Unix path" + ABSOLUTE_PATH_UNIX, u);
assertEquals(URI_FILE_SCHEMA, u.getScheme());
}
/**
* Test for a URI
* @throws IOException
*/
@Test
public void testURI() throws IOException {
LOG.info("Testing correct Unix URI: " + URI_UNIX);
URI u = Util.stringAsURI(URI_UNIX);
LOG.info("Uri: " + u);
assertNotNull("Uri should not be null at this point", u);
assertEquals(URI_FILE_SCHEMA, u.getScheme());
assertEquals(URI_PATH_UNIX, u.getPath());
LOG.info("Testing correct windows URI: " + URI_WINDOWS);
u = Util.stringAsURI(URI_WINDOWS);
LOG.info("Uri: " + u);
assertNotNull("Uri should not be null at this point", u);
assertEquals(URI_FILE_SCHEMA, u.getScheme());
assertEquals(URI_PATH_WINDOWS.replace("%20", " "), u.getPath());
}
}
| 3,487 | 34.591837 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.common;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.namenode.NameNodeHttpServer;
import org.apache.hadoop.hdfs.web.resources.DoAsParam;
import org.apache.hadoop.hdfs.web.resources.UserParam;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
import org.apache.hadoop.security.authorize.AuthorizationException;
import org.apache.hadoop.security.authorize.DefaultImpersonationProvider;
import org.apache.hadoop.security.authorize.ProxyServers;
import org.apache.hadoop.security.authorize.ProxyUsers;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
import org.junit.BeforeClass;
import org.junit.Test;
import javax.servlet.ServletContext;
import javax.servlet.http.HttpServletRequest;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.junit.Assert;
import static org.junit.Assert.*;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class TestJspHelper {
private final Configuration conf = new HdfsConfiguration();
// allow user with TGT to run tests
@BeforeClass
public static void setupKerb() {
System.setProperty("java.security.krb5.kdc", "");
System.setProperty("java.security.krb5.realm", "NONE");
}
public static class DummySecretManager extends
AbstractDelegationTokenSecretManager<DelegationTokenIdentifier> {
public DummySecretManager(long delegationKeyUpdateInterval,
long delegationTokenMaxLifetime, long delegationTokenRenewInterval,
long delegationTokenRemoverScanInterval) {
super(delegationKeyUpdateInterval, delegationTokenMaxLifetime,
delegationTokenRenewInterval, delegationTokenRemoverScanInterval);
}
@Override
public DelegationTokenIdentifier createIdentifier() {
return null;
}
@Override
public byte[] createPassword(DelegationTokenIdentifier dtId) {
return new byte[1];
}
}
@Test
public void testGetUgi() throws IOException {
conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "hdfs://localhost:4321/");
HttpServletRequest request = mock(HttpServletRequest.class);
ServletContext context = mock(ServletContext.class);
String user = "TheDoctor";
Text userText = new Text(user);
DelegationTokenIdentifier dtId = new DelegationTokenIdentifier(userText,
userText, null);
Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>(
dtId, new DummySecretManager(0, 0, 0, 0));
String tokenString = token.encodeToUrlString();
when(request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME)).thenReturn(
tokenString);
when(request.getRemoteUser()).thenReturn(user);
//Test attribute in the url to be used as service in the token.
when(request.getParameter(JspHelper.NAMENODE_ADDRESS)).thenReturn(
"1.1.1.1:1111");
conf.set(DFSConfigKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
UserGroupInformation.setConfiguration(conf);
verifyServiceInToken(context, request, "1.1.1.1:1111");
//Test attribute name.node.address
//Set the nnaddr url parameter to null.
when(request.getParameter(JspHelper.NAMENODE_ADDRESS)).thenReturn(null);
InetSocketAddress addr = new InetSocketAddress("localhost", 2222);
when(context.getAttribute(NameNodeHttpServer.NAMENODE_ADDRESS_ATTRIBUTE_KEY))
.thenReturn(addr);
verifyServiceInToken(context, request, addr.getAddress().getHostAddress()
+ ":2222");
//Test service already set in the token
token.setService(new Text("3.3.3.3:3333"));
tokenString = token.encodeToUrlString();
//Set the name.node.address attribute in Servlet context to null
when(context.getAttribute(NameNodeHttpServer.NAMENODE_ADDRESS_ATTRIBUTE_KEY))
.thenReturn(null);
when(request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME)).thenReturn(
tokenString);
verifyServiceInToken(context, request, "3.3.3.3:3333");
}
private void verifyServiceInToken(ServletContext context,
HttpServletRequest request, String expected) throws IOException {
UserGroupInformation ugi = JspHelper.getUGI(context, request, conf);
Token<? extends TokenIdentifier> tokenInUgi = ugi.getTokens().iterator()
.next();
Assert.assertEquals(expected, tokenInUgi.getService().toString());
}
@Test
public void testGetUgiFromToken() throws IOException {
conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "hdfs://localhost:4321/");
ServletContext context = mock(ServletContext.class);
String realUser = "TheDoctor";
String user = "TheNurse";
conf.set(DFSConfigKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
UserGroupInformation.setConfiguration(conf);
UserGroupInformation ugi;
HttpServletRequest request;
Text ownerText = new Text(user);
DelegationTokenIdentifier dtId = new DelegationTokenIdentifier(
ownerText, ownerText, new Text(realUser));
Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>(
dtId, new DummySecretManager(0, 0, 0, 0));
String tokenString = token.encodeToUrlString();
// token with no auth-ed user
request = getMockRequest(null, null, null);
when(request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME)).thenReturn(
tokenString);
ugi = JspHelper.getUGI(context, request, conf);
Assert.assertNotNull(ugi.getRealUser());
Assert.assertEquals(ugi.getRealUser().getShortUserName(), realUser);
Assert.assertEquals(ugi.getShortUserName(), user);
checkUgiFromToken(ugi);
// token with auth-ed user
request = getMockRequest(realUser, null, null);
when(request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME)).thenReturn(
tokenString);
ugi = JspHelper.getUGI(context, request, conf);
Assert.assertNotNull(ugi.getRealUser());
Assert.assertEquals(ugi.getRealUser().getShortUserName(), realUser);
Assert.assertEquals(ugi.getShortUserName(), user);
checkUgiFromToken(ugi);
// completely different user, token trumps auth
request = getMockRequest("rogue", null, null);
when(request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME)).thenReturn(
tokenString);
ugi = JspHelper.getUGI(context, request, conf);
Assert.assertNotNull(ugi.getRealUser());
Assert.assertEquals(ugi.getRealUser().getShortUserName(), realUser);
Assert.assertEquals(ugi.getShortUserName(), user);
checkUgiFromToken(ugi);
// expected case
request = getMockRequest(null, user, null);
when(request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME)).thenReturn(
tokenString);
ugi = JspHelper.getUGI(context, request, conf);
Assert.assertNotNull(ugi.getRealUser());
Assert.assertEquals(ugi.getRealUser().getShortUserName(), realUser);
Assert.assertEquals(ugi.getShortUserName(), user);
checkUgiFromToken(ugi);
// can't proxy with a token!
request = getMockRequest(null, null, "rogue");
when(request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME)).thenReturn(
tokenString);
try {
JspHelper.getUGI(context, request, conf);
Assert.fail("bad request allowed");
} catch (IOException ioe) {
Assert.assertEquals(
"Usernames not matched: name=rogue != expected="+user,
ioe.getMessage());
}
// can't proxy with a token!
request = getMockRequest(null, user, "rogue");
when(request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME)).thenReturn(
tokenString);
try {
JspHelper.getUGI(context, request, conf);
Assert.fail("bad request allowed");
} catch (IOException ioe) {
Assert.assertEquals(
"Usernames not matched: name=rogue != expected="+user,
ioe.getMessage());
}
}
@Test
public void testGetNonProxyUgi() throws IOException {
conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "hdfs://localhost:4321/");
ServletContext context = mock(ServletContext.class);
String realUser = "TheDoctor";
String user = "TheNurse";
conf.set(DFSConfigKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
UserGroupInformation.setConfiguration(conf);
UserGroupInformation ugi;
HttpServletRequest request;
// have to be auth-ed with remote user
request = getMockRequest(null, null, null);
try {
JspHelper.getUGI(context, request, conf);
Assert.fail("bad request allowed");
} catch (IOException ioe) {
Assert.assertEquals(
"Security enabled but user not authenticated by filter",
ioe.getMessage());
}
request = getMockRequest(null, realUser, null);
try {
JspHelper.getUGI(context, request, conf);
Assert.fail("bad request allowed");
} catch (IOException ioe) {
Assert.assertEquals(
"Security enabled but user not authenticated by filter",
ioe.getMessage());
}
// ugi for remote user
request = getMockRequest(realUser, null, null);
ugi = JspHelper.getUGI(context, request, conf);
Assert.assertNull(ugi.getRealUser());
Assert.assertEquals(ugi.getShortUserName(), realUser);
checkUgiFromAuth(ugi);
// ugi for remote user = real user
request = getMockRequest(realUser, realUser, null);
ugi = JspHelper.getUGI(context, request, conf);
Assert.assertNull(ugi.getRealUser());
Assert.assertEquals(ugi.getShortUserName(), realUser);
checkUgiFromAuth(ugi);
// ugi for remote user != real user
request = getMockRequest(realUser, user, null);
try {
JspHelper.getUGI(context, request, conf);
Assert.fail("bad request allowed");
} catch (IOException ioe) {
Assert.assertEquals(
"Usernames not matched: name="+user+" != expected="+realUser,
ioe.getMessage());
}
}
@Test
public void testGetProxyUgi() throws IOException {
conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "hdfs://localhost:4321/");
ServletContext context = mock(ServletContext.class);
String realUser = "TheDoctor";
String user = "TheNurse";
conf.set(DFSConfigKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
conf.set(DefaultImpersonationProvider.getTestProvider().
getProxySuperuserGroupConfKey(realUser), "*");
conf.set(DefaultImpersonationProvider.getTestProvider().
getProxySuperuserIpConfKey(realUser), "*");
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
UserGroupInformation.setConfiguration(conf);
UserGroupInformation ugi;
HttpServletRequest request;
// have to be auth-ed with remote user
request = getMockRequest(null, null, user);
try {
JspHelper.getUGI(context, request, conf);
Assert.fail("bad request allowed");
} catch (IOException ioe) {
Assert.assertEquals(
"Security enabled but user not authenticated by filter",
ioe.getMessage());
}
request = getMockRequest(null, realUser, user);
try {
JspHelper.getUGI(context, request, conf);
Assert.fail("bad request allowed");
} catch (IOException ioe) {
Assert.assertEquals(
"Security enabled but user not authenticated by filter",
ioe.getMessage());
}
// proxy ugi for user via remote user
request = getMockRequest(realUser, null, user);
ugi = JspHelper.getUGI(context, request, conf);
Assert.assertNotNull(ugi.getRealUser());
Assert.assertEquals(ugi.getRealUser().getShortUserName(), realUser);
Assert.assertEquals(ugi.getShortUserName(), user);
checkUgiFromAuth(ugi);
// proxy ugi for user vi a remote user = real user
request = getMockRequest(realUser, realUser, user);
ugi = JspHelper.getUGI(context, request, conf);
Assert.assertNotNull(ugi.getRealUser());
Assert.assertEquals(ugi.getRealUser().getShortUserName(), realUser);
Assert.assertEquals(ugi.getShortUserName(), user);
checkUgiFromAuth(ugi);
// proxy ugi for user via remote user != real user
request = getMockRequest(realUser, user, user);
try {
JspHelper.getUGI(context, request, conf);
Assert.fail("bad request allowed");
} catch (IOException ioe) {
Assert.assertEquals(
"Usernames not matched: name="+user+" != expected="+realUser,
ioe.getMessage());
}
// try to get get a proxy user with unauthorized user
try {
request = getMockRequest(user, null, realUser);
JspHelper.getUGI(context, request, conf);
Assert.fail("bad proxy request allowed");
} catch (AuthorizationException ae) {
Assert.assertEquals(
"User: " + user + " is not allowed to impersonate " + realUser,
ae.getMessage());
}
try {
request = getMockRequest(user, user, realUser);
JspHelper.getUGI(context, request, conf);
Assert.fail("bad proxy request allowed");
} catch (AuthorizationException ae) {
Assert.assertEquals(
"User: " + user + " is not allowed to impersonate " + realUser,
ae.getMessage());
}
}
private HttpServletRequest getMockRequest(String remoteUser, String user, String doAs) {
HttpServletRequest request = mock(HttpServletRequest.class);
when(request.getParameter(UserParam.NAME)).thenReturn(user);
if (doAs != null) {
when(request.getParameter(DoAsParam.NAME)).thenReturn(doAs);
}
when(request.getRemoteUser()).thenReturn(remoteUser);
return request;
}
private void checkUgiFromAuth(UserGroupInformation ugi) {
if (ugi.getRealUser() != null) {
Assert.assertEquals(AuthenticationMethod.PROXY,
ugi.getAuthenticationMethod());
Assert.assertEquals(AuthenticationMethod.KERBEROS_SSL,
ugi.getRealUser().getAuthenticationMethod());
} else {
Assert.assertEquals(AuthenticationMethod.KERBEROS_SSL,
ugi.getAuthenticationMethod());
}
}
private void checkUgiFromToken(UserGroupInformation ugi) {
if (ugi.getRealUser() != null) {
Assert.assertEquals(AuthenticationMethod.PROXY,
ugi.getAuthenticationMethod());
Assert.assertEquals(AuthenticationMethod.TOKEN,
ugi.getRealUser().getAuthenticationMethod());
} else {
Assert.assertEquals(AuthenticationMethod.TOKEN,
ugi.getAuthenticationMethod());
}
}
@Test
public void testReadWriteReplicaState() {
try {
DataOutputBuffer out = new DataOutputBuffer();
DataInputBuffer in = new DataInputBuffer();
for (HdfsServerConstants.ReplicaState repState : HdfsServerConstants.ReplicaState
.values()) {
repState.write(out);
in.reset(out.getData(), out.getLength());
HdfsServerConstants.ReplicaState result = HdfsServerConstants.ReplicaState
.read(in);
assertTrue("testReadWrite error !!!", repState == result);
out.reset();
in.reset();
}
} catch (Exception ex) {
fail("testReadWrite ex error ReplicaState");
}
}
private static String clientAddr = "1.1.1.1";
private static String chainedClientAddr = clientAddr+", 2.2.2.2";
private static String proxyAddr = "3.3.3.3";
@Test
public void testRemoteAddr() {
assertEquals(clientAddr, getRemoteAddr(clientAddr, null, false));
}
@Test
public void testRemoteAddrWithUntrustedProxy() {
assertEquals(proxyAddr, getRemoteAddr(clientAddr, proxyAddr, false));
}
@Test
public void testRemoteAddrWithTrustedProxy() {
assertEquals(clientAddr, getRemoteAddr(clientAddr, proxyAddr, true));
assertEquals(clientAddr, getRemoteAddr(chainedClientAddr, proxyAddr, true));
}
@Test
public void testRemoteAddrWithTrustedProxyAndEmptyClient() {
assertEquals(proxyAddr, getRemoteAddr(null, proxyAddr, true));
assertEquals(proxyAddr, getRemoteAddr("", proxyAddr, true));
}
private String getRemoteAddr(String clientAddr, String proxyAddr, boolean trusted) {
HttpServletRequest req = mock(HttpServletRequest.class);
when(req.getRemoteAddr()).thenReturn("1.2.3.4");
Configuration conf = new Configuration();
if (proxyAddr == null) {
when(req.getRemoteAddr()).thenReturn(clientAddr);
} else {
when(req.getRemoteAddr()).thenReturn(proxyAddr);
when(req.getHeader("X-Forwarded-For")).thenReturn(clientAddr);
if (trusted) {
conf.set(ProxyServers.CONF_HADOOP_PROXYSERVERS, proxyAddr);
}
}
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
return JspHelper.getRemoteAddr(req);
}
}
| 18,026 | 37.851293 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/StorageAdapter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.common;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.mockito.Mockito;
/**
* Test methods that need to access package-private parts of
* Storage
*/
public abstract class StorageAdapter {
/**
* Inject and return a spy on a storage directory
*/
public static StorageDirectory spyOnStorageDirectory(
Storage s, int idx) {
StorageDirectory dir = Mockito.spy(s.getStorageDir(idx));
s.storageDirs.set(idx, dir);
return dir;
}
}
| 1,339 | 32.5 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestTriggerBlockReport.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.timeout;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.BlockReportOptions;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus;
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
import org.junit.Test;
import org.mockito.Mockito;
/**
* Test manually requesting that the DataNode send a block report.
*/
public final class TestTriggerBlockReport {
private void testTriggerBlockReport(boolean incremental) throws Exception {
Configuration conf = new HdfsConfiguration();
// Set a really long value for dfs.blockreport.intervalMsec and
// dfs.heartbeat.interval, so that incremental block reports and heartbeats
// won't be sent during this test unless they're triggered
// manually.
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10800000L);
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1080L);
final MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
DatanodeProtocolClientSideTranslatorPB spy =
DataNodeTestUtils.spyOnBposToNN(
cluster.getDataNodes().get(0), cluster.getNameNode());
DFSTestUtil.createFile(fs, new Path("/abc"), 16, (short) 1, 1L);
// We should get 1 incremental block report.
Mockito.verify(spy, timeout(60000).times(1)).blockReceivedAndDeleted(
any(DatanodeRegistration.class),
anyString(),
any(StorageReceivedDeletedBlocks[].class));
// We should not receive any more incremental or incremental block reports,
// since the interval we configured is so long.
for (int i = 0; i < 3; i++) {
Thread.sleep(10);
Mockito.verify(spy, times(0)).blockReport(
any(DatanodeRegistration.class),
anyString(),
any(StorageBlockReport[].class),
Mockito.<BlockReportContext>anyObject());
Mockito.verify(spy, times(1)).blockReceivedAndDeleted(
any(DatanodeRegistration.class),
anyString(),
any(StorageReceivedDeletedBlocks[].class));
}
// Create a fake block deletion notification on the DataNode.
// This will be sent with the next incremental block report.
ReceivedDeletedBlockInfo rdbi = new ReceivedDeletedBlockInfo(
new Block(5678, 512, 1000), BlockStatus.DELETED_BLOCK, null);
DataNode datanode = cluster.getDataNodes().get(0);
BPServiceActor actor =
datanode.getAllBpOs().get(0).getBPServiceActors().get(0);
String storageUuid;
try (FsDatasetSpi.FsVolumeReferences volumes =
datanode.getFSDataset().getFsVolumeReferences()) {
storageUuid = volumes.get(0).getStorageID();
}
actor.notifyNamenodeDeletedBlock(rdbi, storageUuid);
// Manually trigger a block report.
datanode.triggerBlockReport(
new BlockReportOptions.Factory().
setIncremental(incremental).
build()
);
// triggerBlockReport returns before the block report is
// actually sent. Wait for it to be sent here.
if (incremental) {
Mockito.verify(spy, timeout(60000).times(2)).
blockReceivedAndDeleted(
any(DatanodeRegistration.class),
anyString(),
any(StorageReceivedDeletedBlocks[].class));
} else {
Mockito.verify(spy, timeout(60000)).blockReport(
any(DatanodeRegistration.class),
anyString(),
any(StorageBlockReport[].class),
Mockito.<BlockReportContext>anyObject());
}
cluster.shutdown();
}
@Test
public void testTriggerFullBlockReport() throws Exception {
testTriggerBlockReport(false);
}
@Test
public void testTriggerIncrementalBlockReport() throws Exception {
testTriggerBlockReport(true);
}
}
| 5,610 | 39.366906 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.DataOutputStream;
import java.io.File;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.Socket;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.Time;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
/**
* Test that datanodes can correctly handle errors during block read/write.
*/
public class TestDiskError {
private FileSystem fs;
private MiniDFSCluster cluster;
private Configuration conf;
@Before
public void setUp() throws Exception {
conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512L);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
fs = cluster.getFileSystem();
}
@After
public void tearDown() throws Exception {
cluster.shutdown();
}
/**
* Test to check that a DN goes down when all its volumes have failed.
*/
@Test
public void testShutdown() throws Exception {
if (System.getProperty("os.name").startsWith("Windows")) {
/**
* This test depends on OS not allowing file creations on a directory
* that does not have write permissions for the user. Apparently it is
* not the case on Windows (at least under Cygwin), and possibly AIX.
* This is disabled on Windows.
*/
return;
}
// Bring up two more datanodes
cluster.startDataNodes(conf, 2, true, null, null);
cluster.waitActive();
final int dnIndex = 0;
String bpid = cluster.getNamesystem().getBlockPoolId();
File storageDir = cluster.getInstanceStorageDir(dnIndex, 0);
File dir1 = MiniDFSCluster.getRbwDir(storageDir, bpid);
storageDir = cluster.getInstanceStorageDir(dnIndex, 1);
File dir2 = MiniDFSCluster.getRbwDir(storageDir, bpid);
try {
// make the data directory of the first datanode to be readonly
assertTrue("Couldn't chmod local vol", dir1.setReadOnly());
assertTrue("Couldn't chmod local vol", dir2.setReadOnly());
// create files and make sure that first datanode will be down
DataNode dn = cluster.getDataNodes().get(dnIndex);
for (int i=0; dn.isDatanodeUp(); i++) {
Path fileName = new Path("/test.txt"+i);
DFSTestUtil.createFile(fs, fileName, 1024, (short)2, 1L);
DFSTestUtil.waitReplication(fs, fileName, (short)2);
fs.delete(fileName, true);
}
} finally {
// restore its old permission
FileUtil.setWritable(dir1, true);
FileUtil.setWritable(dir2, true);
}
}
/**
* Test that when there is a failure replicating a block the temporary
* and meta files are cleaned up and subsequent replication succeeds.
*/
@Test
public void testReplicationError() throws Exception {
// create a file of replication factor of 1
final Path fileName = new Path("/test.txt");
final int fileLen = 1;
DFSTestUtil.createFile(fs, fileName, 1, (short)1, 1L);
DFSTestUtil.waitReplication(fs, fileName, (short)1);
// get the block belonged to the created file
LocatedBlocks blocks = NameNodeAdapter.getBlockLocations(
cluster.getNameNode(), fileName.toString(), 0, (long)fileLen);
assertEquals("Should only find 1 block", blocks.locatedBlockCount(), 1);
LocatedBlock block = blocks.get(0);
// bring up a second datanode
cluster.startDataNodes(conf, 1, true, null, null);
cluster.waitActive();
final int sndNode = 1;
DataNode datanode = cluster.getDataNodes().get(sndNode);
// replicate the block to the second datanode
InetSocketAddress target = datanode.getXferAddress();
Socket s = new Socket(target.getAddress(), target.getPort());
// write the header.
DataOutputStream out = new DataOutputStream(s.getOutputStream());
DataChecksum checksum = DataChecksum.newDataChecksum(
DataChecksum.Type.CRC32, 512);
new Sender(out).writeBlock(block.getBlock(), StorageType.DEFAULT,
BlockTokenSecretManager.DUMMY_TOKEN, "",
new DatanodeInfo[0], new StorageType[0], null,
BlockConstructionStage.PIPELINE_SETUP_CREATE, 1, 0L, 0L, 0L,
checksum, CachingStrategy.newDefaultStrategy(), false, false, null);
out.flush();
// close the connection before sending the content of the block
out.close();
// the temporary block & meta files should be deleted
String bpid = cluster.getNamesystem().getBlockPoolId();
File storageDir = cluster.getInstanceStorageDir(sndNode, 0);
File dir1 = MiniDFSCluster.getRbwDir(storageDir, bpid);
storageDir = cluster.getInstanceStorageDir(sndNode, 1);
File dir2 = MiniDFSCluster.getRbwDir(storageDir, bpid);
while (dir1.listFiles().length != 0 || dir2.listFiles().length != 0) {
Thread.sleep(100);
}
// then increase the file's replication factor
fs.setReplication(fileName, (short)2);
// replication should succeed
DFSTestUtil.waitReplication(fs, fileName, (short)1);
// clean up the file
fs.delete(fileName, false);
}
/**
* Check that the permissions of the local DN directories are as expected.
*/
@Test
public void testLocalDirs() throws Exception {
Configuration conf = new Configuration();
final String permStr = conf.get(
DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY);
FsPermission expected = new FsPermission(permStr);
// Check permissions on directories in 'dfs.datanode.data.dir'
FileSystem localFS = FileSystem.getLocal(conf);
for (DataNode dn : cluster.getDataNodes()) {
try (FsDatasetSpi.FsVolumeReferences volumes =
dn.getFSDataset().getFsVolumeReferences()) {
for (FsVolumeSpi vol : volumes) {
String dir = vol.getBasePath();
Path dataDir = new Path(dir);
FsPermission actual = localFS.getFileStatus(dataDir).getPermission();
assertEquals("Permission for dir: " + dataDir + ", is " + actual +
", while expected is " + expected, expected, actual);
}
}
}
}
/**
* Checks whether {@link DataNode#checkDiskErrorAsync()} is being called or not.
* Before refactoring the code the above function was not getting called
* @throws IOException, InterruptedException
*/
@Test
public void testcheckDiskError() throws IOException, InterruptedException {
if(cluster.getDataNodes().size() <= 0) {
cluster.startDataNodes(conf, 1, true, null, null);
cluster.waitActive();
}
DataNode dataNode = cluster.getDataNodes().get(0);
long slackTime = dataNode.checkDiskErrorInterval/2;
//checking for disk error
dataNode.checkDiskErrorAsync();
Thread.sleep(dataNode.checkDiskErrorInterval);
long lastDiskErrorCheck = dataNode.getLastDiskErrorCheck();
assertTrue("Disk Error check is not performed within " + dataNode.checkDiskErrorInterval + " ms", ((Time.monotonicNow()-lastDiskErrorCheck) < (dataNode.checkDiskErrorInterval + slackTime)));
}
}
| 8,939 | 38.38326 | 197 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import java.io.DataInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.blockmanagement.SequentialBlockIdGenerator;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetFactory;
import org.apache.hadoop.util.DataChecksum;
import org.junit.Before;
import org.junit.Test;
/**
* this class tests the methods of the SimulatedFSDataset.
*/
public class TestSimulatedFSDataset {
Configuration conf = null;
static final String bpid = "BP-TEST";
static final int NUMBLOCKS = 20;
static final int BLOCK_LENGTH_MULTIPLIER = 79;
static final long FIRST_BLK_ID = 1;
@Before
public void setUp() throws Exception {
conf = new HdfsConfiguration();
SimulatedFSDataset.setFactory(conf);
}
static long blockIdToLen(long blkid) {
return blkid * BLOCK_LENGTH_MULTIPLIER;
}
static int addSomeBlocks(SimulatedFSDataset fsdataset) throws IOException {
return addSomeBlocks(fsdataset, false);
}
static int addSomeBlocks(SimulatedFSDataset fsdataset,
boolean negativeBlkID) throws IOException {
return addSomeBlocks(fsdataset, FIRST_BLK_ID, negativeBlkID);
}
static int addSomeBlocks(SimulatedFSDataset fsdataset, long startingBlockId,
boolean negativeBlkID) throws IOException {
int bytesAdded = 0;
for (long i = startingBlockId; i < startingBlockId+NUMBLOCKS; ++i) {
long blkID = negativeBlkID ? i * -1 : i;
ExtendedBlock b = new ExtendedBlock(bpid, blkID, 0, 0);
// we pass expected len as zero, - fsdataset should use the sizeof actual
// data written
ReplicaInPipelineInterface bInfo = fsdataset.createRbw(
StorageType.DEFAULT, b, false).getReplica();
ReplicaOutputStreams out = bInfo.createStreams(true,
DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512));
try {
OutputStream dataOut = out.getDataOut();
assertEquals(0, fsdataset.getLength(b));
for (int j=1; j <= blockIdToLen(i); ++j) {
dataOut.write(j);
assertEquals(j, bInfo.getBytesOnDisk()); // correct length even as we write
bytesAdded++;
}
} finally {
out.close();
}
b.setNumBytes(blockIdToLen(i));
fsdataset.finalizeBlock(b);
assertEquals(blockIdToLen(i), fsdataset.getLength(b));
}
return bytesAdded;
}
static void readSomeBlocks(SimulatedFSDataset fsdataset,
boolean negativeBlkID) throws IOException {
for (long i = FIRST_BLK_ID; i <= NUMBLOCKS; ++i) {
long blkID = negativeBlkID ? i * -1 : i;
ExtendedBlock b = new ExtendedBlock(bpid, blkID, 0, 0);
assertTrue(fsdataset.isValidBlock(b));
assertEquals(blockIdToLen(i), fsdataset.getLength(b));
checkBlockDataAndSize(fsdataset, b, blockIdToLen(i));
}
}
@Test
public void testFSDatasetFactory() {
final Configuration conf = new Configuration();
FsDatasetSpi.Factory<?> f = FsDatasetSpi.Factory.getFactory(conf);
assertEquals(FsDatasetFactory.class, f.getClass());
assertFalse(f.isSimulated());
SimulatedFSDataset.setFactory(conf);
FsDatasetSpi.Factory<?> s = FsDatasetSpi.Factory.getFactory(conf);
assertEquals(SimulatedFSDataset.Factory.class, s.getClass());
assertTrue(s.isSimulated());
}
@Test
public void testGetMetaData() throws IOException {
final SimulatedFSDataset fsdataset = getSimulatedFSDataset();
ExtendedBlock b = new ExtendedBlock(bpid, FIRST_BLK_ID, 5, 0);
try {
assertTrue(fsdataset.getMetaDataInputStream(b) == null);
assertTrue("Expected an IO exception", false);
} catch (IOException e) {
// ok - as expected
}
addSomeBlocks(fsdataset); // Only need to add one but ....
b = new ExtendedBlock(bpid, FIRST_BLK_ID, 0, 0);
InputStream metaInput = fsdataset.getMetaDataInputStream(b);
DataInputStream metaDataInput = new DataInputStream(metaInput);
short version = metaDataInput.readShort();
assertEquals(BlockMetadataHeader.VERSION, version);
DataChecksum checksum = DataChecksum.newDataChecksum(metaDataInput);
assertEquals(DataChecksum.Type.NULL, checksum.getChecksumType());
assertEquals(0, checksum.getChecksumSize());
}
@Test
public void testStorageUsage() throws IOException {
final SimulatedFSDataset fsdataset = getSimulatedFSDataset();
assertEquals(fsdataset.getDfsUsed(), 0);
assertEquals(fsdataset.getRemaining(), fsdataset.getCapacity());
int bytesAdded = addSomeBlocks(fsdataset);
assertEquals(bytesAdded, fsdataset.getDfsUsed());
assertEquals(fsdataset.getCapacity()-bytesAdded, fsdataset.getRemaining());
}
static void checkBlockDataAndSize(SimulatedFSDataset fsdataset,
ExtendedBlock b, long expectedLen) throws IOException {
InputStream input = fsdataset.getBlockInputStream(b);
long lengthRead = 0;
int data;
while ((data = input.read()) != -1) {
assertEquals(SimulatedFSDataset.simulatedByte(b.getLocalBlock(),
lengthRead), (byte) (data & SimulatedFSDataset.BYTE_MASK));
lengthRead++;
}
assertEquals(expectedLen, lengthRead);
}
@Test
public void testWriteRead() throws IOException {
testWriteRead(false);
testWriteRead(true);
}
private void testWriteRead(boolean negativeBlkID) throws IOException {
final SimulatedFSDataset fsdataset = getSimulatedFSDataset();
addSomeBlocks(fsdataset, negativeBlkID);
readSomeBlocks(fsdataset, negativeBlkID);
}
@Test
public void testGetBlockReport() throws IOException {
SimulatedFSDataset fsdataset = getSimulatedFSDataset();
BlockListAsLongs blockReport = fsdataset.getBlockReport(bpid);
assertEquals(0, blockReport.getNumberOfBlocks());
addSomeBlocks(fsdataset);
blockReport = fsdataset.getBlockReport(bpid);
assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
for (Block b: blockReport) {
assertNotNull(b);
assertEquals(blockIdToLen(b.getBlockId()), b.getNumBytes());
}
}
@Test
public void testInjectionEmpty() throws IOException {
SimulatedFSDataset fsdataset = getSimulatedFSDataset();
BlockListAsLongs blockReport = fsdataset.getBlockReport(bpid);
assertEquals(0, blockReport.getNumberOfBlocks());
int bytesAdded = addSomeBlocks(fsdataset);
blockReport = fsdataset.getBlockReport(bpid);
assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
for (Block b: blockReport) {
assertNotNull(b);
assertEquals(blockIdToLen(b.getBlockId()), b.getNumBytes());
}
// Inject blocks into an empty fsdataset
// - injecting the blocks we got above.
SimulatedFSDataset sfsdataset = getSimulatedFSDataset();
sfsdataset.injectBlocks(bpid, blockReport);
blockReport = sfsdataset.getBlockReport(bpid);
assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
for (Block b: blockReport) {
assertNotNull(b);
assertEquals(blockIdToLen(b.getBlockId()), b.getNumBytes());
assertEquals(blockIdToLen(b.getBlockId()), sfsdataset
.getLength(new ExtendedBlock(bpid, b)));
}
assertEquals(bytesAdded, sfsdataset.getDfsUsed());
assertEquals(sfsdataset.getCapacity()-bytesAdded, sfsdataset.getRemaining());
}
@Test
public void testInjectionNonEmpty() throws IOException {
SimulatedFSDataset fsdataset = getSimulatedFSDataset();
BlockListAsLongs blockReport = fsdataset.getBlockReport(bpid);
assertEquals(0, blockReport.getNumberOfBlocks());
int bytesAdded = addSomeBlocks(fsdataset);
blockReport = fsdataset.getBlockReport(bpid);
assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
for (Block b: blockReport) {
assertNotNull(b);
assertEquals(blockIdToLen(b.getBlockId()), b.getNumBytes());
}
fsdataset = null;
// Inject blocks into an non-empty fsdataset
// - injecting the blocks we got above.
SimulatedFSDataset sfsdataset = getSimulatedFSDataset();
// Add come blocks whose block ids do not conflict with
// the ones we are going to inject.
bytesAdded += addSomeBlocks(sfsdataset, NUMBLOCKS+1, false);
sfsdataset.getBlockReport(bpid);
assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
sfsdataset.getBlockReport(bpid);
assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
sfsdataset.injectBlocks(bpid, blockReport);
blockReport = sfsdataset.getBlockReport(bpid);
assertEquals(NUMBLOCKS*2, blockReport.getNumberOfBlocks());
for (Block b: blockReport) {
assertNotNull(b);
assertEquals(blockIdToLen(b.getBlockId()), b.getNumBytes());
assertEquals(blockIdToLen(b.getBlockId()), sfsdataset
.getLength(new ExtendedBlock(bpid, b)));
}
assertEquals(bytesAdded, sfsdataset.getDfsUsed());
assertEquals(sfsdataset.getCapacity()-bytesAdded, sfsdataset.getRemaining());
// Now test that the dataset cannot be created if it does not have sufficient cap
conf.setLong(SimulatedFSDataset.CONFIG_PROPERTY_CAPACITY, 10);
try {
sfsdataset = getSimulatedFSDataset();
sfsdataset.addBlockPool(bpid, conf);
sfsdataset.injectBlocks(bpid, blockReport);
assertTrue("Expected an IO exception", false);
} catch (IOException e) {
// ok - as expected
}
}
public void checkInvalidBlock(ExtendedBlock b) {
final SimulatedFSDataset fsdataset = getSimulatedFSDataset();
assertFalse(fsdataset.isValidBlock(b));
try {
fsdataset.getLength(b);
assertTrue("Expected an IO exception", false);
} catch (IOException e) {
// ok - as expected
}
try {
fsdataset.getBlockInputStream(b);
assertTrue("Expected an IO exception", false);
} catch (IOException e) {
// ok - as expected
}
try {
fsdataset.finalizeBlock(b);
assertTrue("Expected an IO exception", false);
} catch (IOException e) {
// ok - as expected
}
}
@Test
public void testInValidBlocks() throws IOException {
final SimulatedFSDataset fsdataset = getSimulatedFSDataset();
ExtendedBlock b = new ExtendedBlock(bpid, FIRST_BLK_ID, 5, 0);
checkInvalidBlock(b);
// Now check invlaid after adding some blocks
addSomeBlocks(fsdataset);
b = new ExtendedBlock(bpid, NUMBLOCKS + 99, 5, 0);
checkInvalidBlock(b);
}
@Test
public void testInvalidate() throws IOException {
final SimulatedFSDataset fsdataset = getSimulatedFSDataset();
int bytesAdded = addSomeBlocks(fsdataset);
Block[] deleteBlocks = new Block[2];
deleteBlocks[0] = new Block(1, 0, 0);
deleteBlocks[1] = new Block(2, 0, 0);
fsdataset.invalidate(bpid, deleteBlocks);
checkInvalidBlock(new ExtendedBlock(bpid, deleteBlocks[0]));
checkInvalidBlock(new ExtendedBlock(bpid, deleteBlocks[1]));
long sizeDeleted = blockIdToLen(1) + blockIdToLen(2);
assertEquals(bytesAdded-sizeDeleted, fsdataset.getDfsUsed());
assertEquals(fsdataset.getCapacity()-bytesAdded+sizeDeleted, fsdataset.getRemaining());
// Now make sure the rest of the blocks are valid
for (int i=3; i <= NUMBLOCKS; ++i) {
Block b = new Block(i, 0, 0);
assertTrue(fsdataset.isValidBlock(new ExtendedBlock(bpid, b)));
}
}
private SimulatedFSDataset getSimulatedFSDataset() {
SimulatedFSDataset fsdataset = new SimulatedFSDataset(null, conf);
fsdataset.addBlockPool(bpid, conf);
return fsdataset;
}
}
| 12,974 | 36.938596 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import java.lang.management.ManagementFactory;
import java.util.List;
import javax.management.MBeanServer;
import javax.management.ObjectName;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.Assert;
import org.junit.Test;
/**
* Class for testing {@link DataNodeMXBean} implementation
*/
public class TestDataNodeMXBean {
@Test
public void testDataNodeMXBean() throws Exception {
Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
List<DataNode> datanodes = cluster.getDataNodes();
Assert.assertEquals(datanodes.size(), 1);
DataNode datanode = datanodes.get(0);
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName = new ObjectName(
"Hadoop:service=DataNode,name=DataNodeInfo");
// get attribute "ClusterId"
String clusterId = (String) mbs.getAttribute(mxbeanName, "ClusterId");
Assert.assertEquals(datanode.getClusterId(), clusterId);
// get attribute "Version"
String version = (String)mbs.getAttribute(mxbeanName, "Version");
Assert.assertEquals(datanode.getVersion(),version);
// get attribute "RpcPort"
String rpcPort = (String)mbs.getAttribute(mxbeanName, "RpcPort");
Assert.assertEquals(datanode.getRpcPort(),rpcPort);
// get attribute "HttpPort"
String httpPort = (String)mbs.getAttribute(mxbeanName, "HttpPort");
Assert.assertEquals(datanode.getHttpPort(),httpPort);
// get attribute "NamenodeAddresses"
String namenodeAddresses = (String)mbs.getAttribute(mxbeanName,
"NamenodeAddresses");
Assert.assertEquals(datanode.getNamenodeAddresses(),namenodeAddresses);
// get attribute "getVolumeInfo"
String volumeInfo = (String)mbs.getAttribute(mxbeanName, "VolumeInfo");
Assert.assertEquals(replaceDigits(datanode.getVolumeInfo()),
replaceDigits(volumeInfo));
// Ensure mxbean's XceiverCount is same as the DataNode's
// live value.
int xceiverCount = (Integer)mbs.getAttribute(mxbeanName,
"XceiverCount");
Assert.assertEquals(datanode.getXceiverCount(), xceiverCount);
} finally {
if (cluster != null) {cluster.shutdown();}
}
}
private static String replaceDigits(final String s) {
return s.replaceAll("[0-9]+", "_DIGITS_");
}
}
| 3,299 | 39.243902 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestNNHandlesCombinedBlockReport.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import java.io.IOException;
import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
/**
* Runs all tests in BlockReportTestBase, sending one block report
* per DataNode. This tests that the NN can handle the legacy DN
* behavior where it presents itself as a single logical storage.
*/
public class TestNNHandlesCombinedBlockReport extends BlockReportTestBase {
@Override
protected void sendBlockReports(DatanodeRegistration dnR, String poolId,
StorageBlockReport[] reports) throws IOException {
LOG.info("Sending combined block reports for " + dnR);
cluster.getNameNodeRpc().blockReport(dnR, poolId, reports,
new BlockReportContext(1, 0, System.nanoTime(), 0L));
}
}
| 1,732 | 40.261905 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCacheRevocation.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import static org.junit.Assume.assumeTrue;
import java.io.File;
import java.nio.ByteBuffer;
import java.util.EnumSet;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.ReadOption;
import org.apache.hadoop.hdfs.BlockReaderTestUtil;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.io.nativeio.NativeIO.POSIX.CacheManipulator;
import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator;
import org.apache.hadoop.net.unix.DomainSocket;
import org.apache.hadoop.net.unix.TemporarySocketDirectory;
import org.apache.hadoop.util.NativeCodeLoader;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class TestFsDatasetCacheRevocation {
private static final Logger LOG = LoggerFactory.getLogger(
TestFsDatasetCacheRevocation.class);
private static CacheManipulator prevCacheManipulator;
private static TemporarySocketDirectory sockDir;
private static final int BLOCK_SIZE = 4096;
@Before
public void setUp() throws Exception {
prevCacheManipulator = NativeIO.POSIX.getCacheManipulator();
NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator());
DomainSocket.disableBindPathValidation();
sockDir = new TemporarySocketDirectory();
}
@After
public void tearDown() throws Exception {
// Restore the original CacheManipulator
NativeIO.POSIX.setCacheManipulator(prevCacheManipulator);
sockDir.close();
}
private static Configuration getDefaultConf() {
HdfsConfiguration conf = new HdfsConfiguration();
conf.setLong(
DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS, 50);
conf.setLong(DFSConfigKeys.DFS_CACHEREPORT_INTERVAL_MSEC_KEY, 250);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
TestFsDatasetCache.CACHE_CAPACITY);
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
new File(sockDir.getDir(), "sock").getAbsolutePath());
return conf;
}
/**
* Test that when a client has a replica mmapped, we will not un-mlock that
* replica for a reasonable amount of time, even if an uncache request
* occurs.
*/
@Test(timeout=120000)
public void testPinning() throws Exception {
assumeTrue(NativeCodeLoader.isNativeCodeLoaded() && !Path.WINDOWS);
Configuration conf = getDefaultConf();
// Set a really long revocation timeout, so that we won't reach it during
// this test.
conf.setLong(DFSConfigKeys.DFS_DATANODE_CACHE_REVOCATION_TIMEOUT_MS,
1800000L);
// Poll very often
conf.setLong(DFSConfigKeys.DFS_DATANODE_CACHE_REVOCATION_POLLING_MS, 2L);
MiniDFSCluster cluster = null;
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
DistributedFileSystem dfs = cluster.getFileSystem();
// Create and cache a file.
final String TEST_FILE = "/test_file";
DFSTestUtil.createFile(dfs, new Path(TEST_FILE),
BLOCK_SIZE, (short)1, 0xcafe);
dfs.addCachePool(new CachePoolInfo("pool"));
long cacheDirectiveId =
dfs.addCacheDirective(new CacheDirectiveInfo.Builder().
setPool("pool").setPath(new Path(TEST_FILE)).
setReplication((short) 1).build());
FsDatasetSpi<?> fsd = cluster.getDataNodes().get(0).getFSDataset();
DFSTestUtil.verifyExpectedCacheUsage(BLOCK_SIZE, 1, fsd);
// Mmap the file.
FSDataInputStream in = dfs.open(new Path(TEST_FILE));
ByteBuffer buf =
in.read(null, BLOCK_SIZE, EnumSet.noneOf(ReadOption.class));
// Attempt to uncache file. The file should still be cached.
dfs.removeCacheDirective(cacheDirectiveId);
Thread.sleep(500);
DFSTestUtil.verifyExpectedCacheUsage(BLOCK_SIZE, 1, fsd);
// Un-mmap the file. The file should be uncached after this.
in.releaseBuffer(buf);
DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd);
// Cleanup
in.close();
cluster.shutdown();
}
/**
* Test that when we have an uncache request, and the client refuses to release
* the replica for a long time, we will un-mlock it.
*/
@Test(timeout=120000)
public void testRevocation() throws Exception {
assumeTrue(NativeCodeLoader.isNativeCodeLoaded() && !Path.WINDOWS);
BlockReaderTestUtil.enableHdfsCachingTracing();
BlockReaderTestUtil.enableShortCircuitShmTracing();
Configuration conf = getDefaultConf();
// Set a really short revocation timeout.
conf.setLong(DFSConfigKeys.DFS_DATANODE_CACHE_REVOCATION_TIMEOUT_MS, 250L);
// Poll very often
conf.setLong(DFSConfigKeys.DFS_DATANODE_CACHE_REVOCATION_POLLING_MS, 2L);
MiniDFSCluster cluster = null;
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
DistributedFileSystem dfs = cluster.getFileSystem();
// Create and cache a file.
final String TEST_FILE = "/test_file2";
DFSTestUtil.createFile(dfs, new Path(TEST_FILE),
BLOCK_SIZE, (short)1, 0xcafe);
dfs.addCachePool(new CachePoolInfo("pool"));
long cacheDirectiveId =
dfs.addCacheDirective(new CacheDirectiveInfo.Builder().
setPool("pool").setPath(new Path(TEST_FILE)).
setReplication((short) 1).build());
FsDatasetSpi<?> fsd = cluster.getDataNodes().get(0).getFSDataset();
DFSTestUtil.verifyExpectedCacheUsage(BLOCK_SIZE, 1, fsd);
// Mmap the file.
FSDataInputStream in = dfs.open(new Path(TEST_FILE));
ByteBuffer buf =
in.read(null, BLOCK_SIZE, EnumSet.noneOf(ReadOption.class));
// Attempt to uncache file. The file should get uncached.
LOG.info("removing cache directive {}", cacheDirectiveId);
dfs.removeCacheDirective(cacheDirectiveId);
LOG.info("finished removing cache directive {}", cacheDirectiveId);
Thread.sleep(1000);
DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd);
// Cleanup
in.releaseBuffer(buf);
in.close();
cluster.shutdown();
}
}
| 7,603 | 38.604167 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.Set;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.MiniDFSNNTopology.NNConf;
import org.apache.hadoop.hdfs.MiniDFSNNTopology.NSConf;
import org.junit.Test;
import com.google.common.base.Joiner;
import com.google.common.collect.Sets;
/**
* Tests datanode refresh namenode list functionality.
*/
public class TestRefreshNamenodes {
private final int nnPort1 = 2221;
private final int nnPort2 = 2224;
private final int nnPort3 = 2227;
private final int nnPort4 = 2230;
@Test
public void testRefreshNamenodes() throws IOException {
// Start cluster with a single NN and DN
Configuration conf = new Configuration();
MiniDFSCluster cluster = null;
try {
MiniDFSNNTopology topology = new MiniDFSNNTopology()
.addNameservice(new NSConf("ns1").addNN(
new NNConf(null).setIpcPort(nnPort1)))
.setFederation(true);
cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(topology)
.build();
DataNode dn = cluster.getDataNodes().get(0);
assertEquals(1, dn.getAllBpOs().size());
cluster.addNameNode(conf, nnPort2);
assertEquals(2, dn.getAllBpOs().size());
cluster.addNameNode(conf, nnPort3);
assertEquals(3, dn.getAllBpOs().size());
cluster.addNameNode(conf, nnPort4);
// Ensure a BPOfferService in the datanodes corresponds to
// a namenode in the cluster
Set<InetSocketAddress> nnAddrsFromCluster = Sets.newHashSet();
for (int i = 0; i < 4; i++) {
assertTrue(nnAddrsFromCluster.add(
cluster.getNameNode(i).getNameNodeAddress()));
}
Set<InetSocketAddress> nnAddrsFromDN = Sets.newHashSet();
for (BPOfferService bpos : dn.getAllBpOs()) {
for (BPServiceActor bpsa : bpos.getBPServiceActors()) {
assertTrue(nnAddrsFromDN.add(bpsa.getNNSocketAddress()));
}
}
assertEquals("",
Joiner.on(",").join(
Sets.symmetricDifference(nnAddrsFromCluster, nnAddrsFromDN)));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
}
| 3,258 | 32.597938 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStartSecureDataNode.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
import static org.apache.hadoop.security.SecurityUtilTestHelper.isExternalKdcRunning;
import org.junit.Assume;
import org.junit.Before;
import org.junit.Test;
/**
* This test starts a 1 NameNode 1 DataNode MiniDFSCluster with
* kerberos authentication enabled using user-specified KDC,
* principals, and keytabs.
*
* A secure DataNode has to be started by root, so this test needs to
* be run by root.
*
* To run, users must specify the following system properties:
* externalKdc=true
* java.security.krb5.conf
* dfs.namenode.kerberos.principal
* dfs.namenode.kerberos.internal.spnego.principal
* dfs.namenode.keytab.file
* dfs.datanode.kerberos.principal
* dfs.datanode.keytab.file
*/
public class TestStartSecureDataNode {
final static private int NUM_OF_DATANODES = 1;
@Before
public void testExternalKdcRunning() {
// Tests are skipped if external KDC is not running.
Assume.assumeTrue(isExternalKdcRunning());
}
@Test
public void testSecureNameNode() throws IOException, InterruptedException {
MiniDFSCluster cluster = null;
try {
String nnPrincipal =
System.getProperty("dfs.namenode.kerberos.principal");
String nnSpnegoPrincipal =
System.getProperty("dfs.namenode.kerberos.internal.spnego.principal");
String nnKeyTab = System.getProperty("dfs.namenode.keytab.file");
assertNotNull("NameNode principal was not specified", nnPrincipal);
assertNotNull("NameNode SPNEGO principal was not specified",
nnSpnegoPrincipal);
assertNotNull("NameNode keytab was not specified", nnKeyTab);
String dnPrincipal = System.getProperty("dfs.datanode.kerberos.principal");
String dnKeyTab = System.getProperty("dfs.datanode.keytab.file");
assertNotNull("DataNode principal was not specified", dnPrincipal);
assertNotNull("DataNode keytab was not specified", dnKeyTab);
Configuration conf = new HdfsConfiguration();
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
"kerberos");
conf.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, nnPrincipal);
conf.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY,
nnSpnegoPrincipal);
conf.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, nnKeyTab);
conf.set(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, dnPrincipal);
conf.set(DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY, dnKeyTab);
// Secure DataNode requires using ports lower than 1024.
conf.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "127.0.0.1:1004");
conf.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:1006");
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, "700");
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_OF_DATANODES)
.checkDataNodeAddrConfig(true)
.build();
cluster.waitActive();
assertTrue(cluster.isDataNodeUp());
} catch (Exception ex) {
ex.printStackTrace();
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
}
| 4,716 | 38.974576 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeInitStorage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import java.io.*;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.junit.Test;
import static org.hamcrest.core.IsNot.not;
import static org.junit.Assert.*;
/**
* Test to verify that the DataNode Uuid is correctly initialized before
* FsDataSet initialization.
*/
public class TestDataNodeInitStorage {
public static final Log LOG = LogFactory.getLog(TestDataNodeInitStorage.class);
static private class SimulatedFsDatasetVerifier extends SimulatedFSDataset {
static class Factory extends FsDatasetSpi.Factory<SimulatedFSDataset> {
@Override
public SimulatedFsDatasetVerifier newInstance(
DataNode datanode, DataStorage storage,
Configuration conf) throws IOException {
return new SimulatedFsDatasetVerifier(storage, conf);
}
@Override
public boolean isSimulated() {
return true;
}
}
public static void setFactory(Configuration conf) {
conf.set(DFSConfigKeys.DFS_DATANODE_FSDATASET_FACTORY_KEY,
Factory.class.getName());
}
// This constructor does the actual verification by ensuring that
// the DatanodeUuid is initialized.
public SimulatedFsDatasetVerifier(DataStorage storage, Configuration conf) {
super(storage, conf);
LOG.info("Assigned DatanodeUuid is " + storage.getDatanodeUuid());
assert(storage.getDatanodeUuid() != null);
assert(storage.getDatanodeUuid().length() != 0);
}
}
@Test (timeout = 60000)
public void testDataNodeInitStorage() throws Throwable {
// Create configuration to use SimulatedFsDatasetVerifier#Factory.
Configuration conf = new HdfsConfiguration();
SimulatedFsDatasetVerifier.setFactory(conf);
// Start a cluster so that SimulatedFsDatasetVerifier constructor is
// invoked.
MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
cluster.shutdown();
}
}
| 3,109 | 34.340909 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNotSame;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.List;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.util.StringUtils;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
public class TestDataNodeMultipleRegistrations {
private static final Log LOG =
LogFactory.getLog(TestDataNodeMultipleRegistrations.class);
Configuration conf;
@Before
public void setUp() throws Exception {
conf = new HdfsConfiguration();
}
/**
* start multiple NNs and single DN and verifies per BP registrations and
* handshakes.
*
* @throws IOException
*/
@Test
public void test2NNRegistration() throws IOException {
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
.build();
try {
cluster.waitActive();
NameNode nn1 = cluster.getNameNode(0);
NameNode nn2 = cluster.getNameNode(1);
assertNotNull("cannot create nn1", nn1);
assertNotNull("cannot create nn2", nn2);
String bpid1 = FSImageTestUtil.getFSImage(nn1).getBlockPoolID();
String bpid2 = FSImageTestUtil.getFSImage(nn2).getBlockPoolID();
String cid1 = FSImageTestUtil.getFSImage(nn1).getClusterID();
String cid2 = FSImageTestUtil.getFSImage(nn2).getClusterID();
int lv1 =FSImageTestUtil.getFSImage(nn1).getLayoutVersion();
int lv2 = FSImageTestUtil.getFSImage(nn2).getLayoutVersion();
int ns1 = FSImageTestUtil.getFSImage(nn1).getNamespaceID();
int ns2 = FSImageTestUtil.getFSImage(nn2).getNamespaceID();
assertNotSame("namespace ids should be different", ns1, ns2);
LOG.info("nn1: lv=" + lv1 + ";cid=" + cid1 + ";bpid=" + bpid1 + ";uri="
+ nn1.getNameNodeAddress());
LOG.info("nn2: lv=" + lv2 + ";cid=" + cid2 + ";bpid=" + bpid2 + ";uri="
+ nn2.getNameNodeAddress());
// check number of volumes in fsdataset
DataNode dn = cluster.getDataNodes().get(0);
final Map<String, Object> volInfos = dn.data.getVolumeInfoMap();
Assert.assertTrue("No volumes in the fsdataset", volInfos.size() > 0);
int i = 0;
for (Map.Entry<String, Object> e : volInfos.entrySet()) {
LOG.info("vol " + i++ + ") " + e.getKey() + ": " + e.getValue());
}
// number of volumes should be 2 - [data1, data2]
assertEquals("number of volumes is wrong", 2, volInfos.size());
for (BPOfferService bpos : dn.getAllBpOs()) {
LOG.info("BP: " + bpos);
}
BPOfferService bpos1 = dn.getAllBpOs().get(0);
BPOfferService bpos2 = dn.getAllBpOs().get(1);
// The order of bpos is not guaranteed, so fix the order
if (getNNSocketAddress(bpos1).equals(nn2.getNameNodeAddress())) {
BPOfferService tmp = bpos1;
bpos1 = bpos2;
bpos2 = tmp;
}
assertEquals("wrong nn address", getNNSocketAddress(bpos1),
nn1.getNameNodeAddress());
assertEquals("wrong nn address", getNNSocketAddress(bpos2),
nn2.getNameNodeAddress());
assertEquals("wrong bpid", bpos1.getBlockPoolId(), bpid1);
assertEquals("wrong bpid", bpos2.getBlockPoolId(), bpid2);
assertEquals("wrong cid", dn.getClusterId(), cid1);
assertEquals("cid should be same", cid2, cid1);
assertEquals("namespace should be same",
bpos1.bpNSInfo.namespaceID, ns1);
assertEquals("namespace should be same",
bpos2.bpNSInfo.namespaceID, ns2);
} finally {
cluster.shutdown();
}
}
private static InetSocketAddress getNNSocketAddress(BPOfferService bpos) {
List<BPServiceActor> actors = bpos.getBPServiceActors();
assertEquals(1, actors.size());
return actors.get(0).getNNSocketAddress();
}
/**
* starts single nn and single dn and verifies registration and handshake
*
* @throws IOException
*/
@Test
public void testFedSingleNN() throws IOException {
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.nameNodePort(9927).build();
try {
NameNode nn1 = cluster.getNameNode();
assertNotNull("cannot create nn1", nn1);
String bpid1 = FSImageTestUtil.getFSImage(nn1).getBlockPoolID();
String cid1 = FSImageTestUtil.getFSImage(nn1).getClusterID();
int lv1 = FSImageTestUtil.getFSImage(nn1).getLayoutVersion();
LOG.info("nn1: lv=" + lv1 + ";cid=" + cid1 + ";bpid=" + bpid1 + ";uri="
+ nn1.getNameNodeAddress());
// check number of vlumes in fsdataset
DataNode dn = cluster.getDataNodes().get(0);
final Map<String, Object> volInfos = dn.data.getVolumeInfoMap();
Assert.assertTrue("No volumes in the fsdataset", volInfos.size() > 0);
int i = 0;
for (Map.Entry<String, Object> e : volInfos.entrySet()) {
LOG.info("vol " + i++ + ") " + e.getKey() + ": " + e.getValue());
}
// number of volumes should be 2 - [data1, data2]
assertEquals("number of volumes is wrong", 2, volInfos.size());
for (BPOfferService bpos : dn.getAllBpOs()) {
LOG.info("reg: bpid=" + "; name=" + bpos.bpRegistration + "; sid="
+ bpos.bpRegistration.getDatanodeUuid() + "; nna=" +
getNNSocketAddress(bpos));
}
// try block report
BPOfferService bpos1 = dn.getAllBpOs().get(0);
bpos1.triggerBlockReportForTests();
assertEquals("wrong nn address",
getNNSocketAddress(bpos1),
nn1.getNameNodeAddress());
assertEquals("wrong bpid", bpos1.getBlockPoolId(), bpid1);
assertEquals("wrong cid", dn.getClusterId(), cid1);
cluster.shutdown();
// Ensure all the BPOfferService threads are shutdown
assertEquals(0, dn.getAllBpOs().size());
cluster = null;
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test
public void testClusterIdMismatch() throws Exception {
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
.build();
try {
cluster.waitActive();
DataNode dn = cluster.getDataNodes().get(0);
List<BPOfferService> bposs = dn.getAllBpOs();
LOG.info("dn bpos len (should be 2):" + bposs.size());
Assert.assertEquals("should've registered with two namenodes", bposs.size(),2);
// add another namenode
cluster.addNameNode(conf, 9938);
Thread.sleep(500);// lets wait for the registration to happen
bposs = dn.getAllBpOs();
LOG.info("dn bpos len (should be 3):" + bposs.size());
Assert.assertEquals("should've registered with three namenodes", bposs.size(),3);
// change cluster id and another Namenode
StartupOption.FORMAT.setClusterId("DifferentCID");
cluster.addNameNode(conf, 9948);
NameNode nn4 = cluster.getNameNode(3);
assertNotNull("cannot create nn4", nn4);
Thread.sleep(500);// lets wait for the registration to happen
bposs = dn.getAllBpOs();
LOG.info("dn bpos len (still should be 3):" + bposs.size());
Assert.assertEquals("should've registered with three namenodes", 3, bposs.size());
} finally {
cluster.shutdown();
}
}
@Test(timeout = 20000)
public void testClusterIdMismatchAtStartupWithHA() throws Exception {
MiniDFSNNTopology top = new MiniDFSNNTopology()
.addNameservice(new MiniDFSNNTopology.NSConf("ns1")
.addNN(new MiniDFSNNTopology.NNConf("nn0"))
.addNN(new MiniDFSNNTopology.NNConf("nn1")))
.addNameservice(new MiniDFSNNTopology.NSConf("ns2")
.addNN(new MiniDFSNNTopology.NNConf("nn2").setClusterId("bad-cid"))
.addNN(new MiniDFSNNTopology.NNConf("nn3").setClusterId("bad-cid")));
top.setFederation(true);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).nnTopology(top)
.numDataNodes(0).build();
try {
cluster.startDataNodes(conf, 1, true, null, null);
// let the initialization be complete
Thread.sleep(10000);
DataNode dn = cluster.getDataNodes().get(0);
assertTrue("Datanode should be running", dn.isDatanodeUp());
assertEquals("Only one BPOfferService should be running", 1,
dn.getAllBpOs().size());
} finally {
cluster.shutdown();
}
}
@Test
public void testDNWithInvalidStorageWithHA() throws Exception {
MiniDFSNNTopology top = new MiniDFSNNTopology()
.addNameservice(new MiniDFSNNTopology.NSConf("ns1")
.addNN(new MiniDFSNNTopology.NNConf("nn0").setClusterId("cluster-1"))
.addNN(new MiniDFSNNTopology.NNConf("nn1").setClusterId("cluster-1")));
top.setFederation(true);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).nnTopology(top)
.numDataNodes(0).build();
try {
cluster.startDataNodes(conf, 1, true, null, null);
// let the initialization be complete
Thread.sleep(10000);
DataNode dn = cluster.getDataNodes().get(0);
assertTrue("Datanode should be running", dn.isDatanodeUp());
assertEquals("BPOfferService should be running", 1,
dn.getAllBpOs().size());
DataNodeProperties dnProp = cluster.stopDataNode(0);
cluster.getNameNode(0).stop();
cluster.getNameNode(1).stop();
Configuration nn1 = cluster.getConfiguration(0);
Configuration nn2 = cluster.getConfiguration(1);
// setting up invalid cluster
StartupOption.FORMAT.setClusterId("cluster-2");
DFSTestUtil.formatNameNode(nn1);
MiniDFSCluster.copyNameDirs(FSNamesystem.getNamespaceDirs(nn1),
FSNamesystem.getNamespaceDirs(nn2), nn2);
cluster.restartNameNode(0, false);
cluster.restartNameNode(1, false);
cluster.restartDataNode(dnProp);
// let the initialization be complete
Thread.sleep(10000);
dn = cluster.getDataNodes().get(0);
assertFalse("Datanode should have shutdown as only service failed",
dn.isDatanodeUp());
} finally {
cluster.shutdown();
}
}
@Test
public void testMiniDFSClusterWithMultipleNN() throws IOException {
Configuration conf = new HdfsConfiguration();
// start Federated cluster and add a node.
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
.build();
// add a node
try {
cluster.waitActive();
Assert.assertEquals("(1)Should be 2 namenodes", 2, cluster.getNumNameNodes());
cluster.addNameNode(conf, 0);
Assert.assertEquals("(1)Should be 3 namenodes", 3, cluster.getNumNameNodes());
} catch (IOException ioe) {
Assert.fail("Failed to add NN to cluster:" + StringUtils.stringifyException(ioe));
} finally {
cluster.shutdown();
}
// 2. start with Federation flag set
conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleFederatedTopology(1))
.build();
try {
Assert.assertNotNull(cluster);
cluster.waitActive();
Assert.assertEquals("(2)Should be 1 namenodes", 1, cluster.getNumNameNodes());
// add a node
cluster.addNameNode(conf, 0);
Assert.assertEquals("(2)Should be 2 namenodes", 2, cluster.getNumNameNodes());
} catch (IOException ioe) {
Assert.fail("Failed to add NN to cluster:" + StringUtils.stringifyException(ioe));
} finally {
cluster.shutdown();
}
// 3. start non-federated
conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).build();
// add a node
try {
cluster.waitActive();
Assert.assertNotNull(cluster);
Assert.assertEquals("(2)Should be 1 namenodes", 1, cluster.getNumNameNodes());
cluster.addNameNode(conf, 9929);
Assert.fail("shouldn't be able to add another NN to non federated cluster");
} catch (IOException e) {
// correct
Assert.assertTrue(e.getMessage().startsWith("cannot add namenode"));
Assert.assertEquals("(3)Should be 1 namenodes", 1, cluster.getNumNameNodes());
} finally {
cluster.shutdown();
}
}
}
| 13,924 | 37.046448 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import com.google.common.base.Joiner;
import com.google.common.collect.Lists;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.ReconfigurationException;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.BlockMissingException;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl;
import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After;
import org.junit.Test;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.TimeoutException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
import static org.hamcrest.CoreMatchers.anyOf;
import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.CoreMatchers.not;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.junit.Assume.assumeTrue;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.timeout;
public class TestDataNodeHotSwapVolumes {
private static final Log LOG = LogFactory.getLog(
TestDataNodeHotSwapVolumes.class);
private static final int BLOCK_SIZE = 512;
private MiniDFSCluster cluster;
@After
public void tearDown() {
shutdown();
}
private void startDFSCluster(int numNameNodes, int numDataNodes)
throws IOException {
shutdown();
Configuration conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
/*
* Lower the DN heartbeat, DF rate, and recheck interval to one second
* so state about failures and datanode death propagates faster.
*/
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_DF_INTERVAL_KEY, 1000);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
1000);
/* Allow 1 volume failure */
conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1);
MiniDFSNNTopology nnTopology =
MiniDFSNNTopology.simpleFederatedTopology(numNameNodes);
cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(nnTopology)
.numDataNodes(numDataNodes)
.build();
cluster.waitActive();
}
private void shutdown() {
if (cluster != null) {
cluster.shutdown();
cluster = null;
}
}
private void createFile(Path path, int numBlocks)
throws IOException, InterruptedException, TimeoutException {
final short replicateFactor = 1;
createFile(path, numBlocks, replicateFactor);
}
private void createFile(Path path, int numBlocks, short replicateFactor)
throws IOException, InterruptedException, TimeoutException {
createFile(0, path, numBlocks, replicateFactor);
}
private void createFile(int fsIdx, Path path, int numBlocks)
throws IOException, InterruptedException, TimeoutException {
final short replicateFactor = 1;
createFile(fsIdx, path, numBlocks, replicateFactor);
}
private void createFile(int fsIdx, Path path, int numBlocks,
short replicateFactor)
throws IOException, TimeoutException, InterruptedException {
final int seed = 0;
final DistributedFileSystem fs = cluster.getFileSystem(fsIdx);
DFSTestUtil.createFile(fs, path, BLOCK_SIZE * numBlocks,
replicateFactor, seed);
DFSTestUtil.waitReplication(fs, path, replicateFactor);
}
/**
* Verify whether a file has enough content.
*/
private static void verifyFileLength(FileSystem fs, Path path, int numBlocks)
throws IOException {
FileStatus status = fs.getFileStatus(path);
assertEquals(numBlocks * BLOCK_SIZE, status.getLen());
}
/** Return the number of replicas for a given block in the file. */
private static int getNumReplicas(FileSystem fs, Path file,
int blockIdx) throws IOException {
BlockLocation locs[] = fs.getFileBlockLocations(file, 0, Long.MAX_VALUE);
return locs.length < blockIdx + 1 ? 0 : locs[blockIdx].getNames().length;
}
/**
* Wait the block to have the exact number of replicas as expected.
*/
private static void waitReplication(FileSystem fs, Path file, int blockIdx,
int numReplicas)
throws IOException, TimeoutException, InterruptedException {
int attempts = 50; // Wait 5 seconds.
while (attempts > 0) {
int actualReplicas = getNumReplicas(fs, file, blockIdx);
if (actualReplicas == numReplicas) {
return;
}
System.out.printf("Block %d of file %s has %d replicas (desired %d).\n",
blockIdx, file.toString(), actualReplicas, numReplicas);
Thread.sleep(100);
attempts--;
}
throw new TimeoutException("Timed out waiting the " + blockIdx + "-th block"
+ " of " + file + " to have " + numReplicas + " replicas.");
}
/** Parses data dirs from DataNode's configuration. */
private static List<String> getDataDirs(DataNode datanode) {
return new ArrayList<String>(datanode.getConf().getTrimmedStringCollection(
DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY));
}
/** Force the DataNode to report missing blocks immediately. */
private static void triggerDeleteReport(DataNode datanode)
throws IOException {
datanode.scheduleAllBlockReport(0);
DataNodeTestUtils.triggerDeletionReport(datanode);
}
@Test
public void testParseChangedVolumes() throws IOException {
startDFSCluster(1, 1);
DataNode dn = cluster.getDataNodes().get(0);
Configuration conf = dn.getConf();
String oldPaths = conf.get(DFS_DATANODE_DATA_DIR_KEY);
List<StorageLocation> oldLocations = new ArrayList<StorageLocation>();
for (String path : oldPaths.split(",")) {
oldLocations.add(StorageLocation.parse(path));
}
assertFalse(oldLocations.isEmpty());
String newPaths = oldLocations.get(0).getFile().getAbsolutePath() +
",/foo/path1,/foo/path2";
DataNode.ChangedVolumes changedVolumes =
dn.parseChangedVolumes(newPaths);
List<StorageLocation> newVolumes = changedVolumes.newLocations;
assertEquals(2, newVolumes.size());
assertEquals(new File("/foo/path1").getAbsolutePath(),
newVolumes.get(0).getFile().getAbsolutePath());
assertEquals(new File("/foo/path2").getAbsolutePath(),
newVolumes.get(1).getFile().getAbsolutePath());
List<StorageLocation> removedVolumes = changedVolumes.deactivateLocations;
assertEquals(1, removedVolumes.size());
assertEquals(oldLocations.get(1).getFile(),
removedVolumes.get(0).getFile());
assertEquals(1, changedVolumes.unchangedLocations.size());
assertEquals(oldLocations.get(0).getFile(),
changedVolumes.unchangedLocations.get(0).getFile());
}
@Test
public void testParseChangedVolumesFailures() throws IOException {
startDFSCluster(1, 1);
DataNode dn = cluster.getDataNodes().get(0);
try {
dn.parseChangedVolumes("");
fail("Should throw IOException: empty inputs.");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("No directory is specified.", e);
}
}
/** Add volumes to the first DataNode. */
private void addVolumes(int numNewVolumes)
throws ReconfigurationException, IOException {
File dataDir = new File(cluster.getDataDirectory());
DataNode dn = cluster.getDataNodes().get(0); // First DataNode.
Configuration conf = dn.getConf();
String oldDataDir = conf.get(DFS_DATANODE_DATA_DIR_KEY);
List<File> newVolumeDirs = new ArrayList<File>();
StringBuilder newDataDirBuf = new StringBuilder(oldDataDir);
int startIdx = oldDataDir.split(",").length + 1;
// Find the first available (non-taken) directory name for data volume.
while (true) {
File volumeDir = new File(dataDir, "data" + startIdx);
if (!volumeDir.exists()) {
break;
}
startIdx++;
}
for (int i = startIdx; i < startIdx + numNewVolumes; i++) {
File volumeDir = new File(dataDir, "data" + String.valueOf(i));
newVolumeDirs.add(volumeDir);
volumeDir.mkdirs();
newDataDirBuf.append(",");
newDataDirBuf.append(
StorageLocation.parse(volumeDir.toString()).toString());
}
String newDataDir = newDataDirBuf.toString();
dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, newDataDir);
// Verify the configuration value is appropriately set.
String[] effectiveDataDirs = conf.get(DFS_DATANODE_DATA_DIR_KEY).split(",");
String[] expectDataDirs = newDataDir.split(",");
assertEquals(expectDataDirs.length, effectiveDataDirs.length);
for (int i = 0; i < expectDataDirs.length; i++) {
StorageLocation expectLocation = StorageLocation.parse(expectDataDirs[i]);
StorageLocation effectiveLocation =
StorageLocation.parse(effectiveDataDirs[i]);
assertEquals(expectLocation.getStorageType(),
effectiveLocation.getStorageType());
assertEquals(expectLocation.getFile().getCanonicalFile(),
effectiveLocation.getFile().getCanonicalFile());
}
// Check that all newly created volumes are appropriately formatted.
for (File volumeDir : newVolumeDirs) {
File curDir = new File(volumeDir, "current");
assertTrue(curDir.exists());
assertTrue(curDir.isDirectory());
}
}
private List<List<Integer>> getNumBlocksReport(int namesystemIdx) {
List<List<Integer>> results = new ArrayList<List<Integer>>();
final String bpid = cluster.getNamesystem(namesystemIdx).getBlockPoolId();
List<Map<DatanodeStorage, BlockListAsLongs>> blockReports =
cluster.getAllBlockReports(bpid);
for (Map<DatanodeStorage, BlockListAsLongs> datanodeReport : blockReports) {
List<Integer> numBlocksPerDN = new ArrayList<Integer>();
for (BlockListAsLongs blocks : datanodeReport.values()) {
numBlocksPerDN.add(blocks.getNumberOfBlocks());
}
results.add(numBlocksPerDN);
}
return results;
}
/**
* Test adding one volume on a running MiniDFSCluster with only one NameNode.
*/
@Test(timeout=60000)
public void testAddOneNewVolume()
throws IOException, ReconfigurationException,
InterruptedException, TimeoutException {
startDFSCluster(1, 1);
String bpid = cluster.getNamesystem().getBlockPoolId();
final int numBlocks = 10;
addVolumes(1);
Path testFile = new Path("/test");
createFile(testFile, numBlocks);
List<Map<DatanodeStorage, BlockListAsLongs>> blockReports =
cluster.getAllBlockReports(bpid);
assertEquals(1, blockReports.size()); // 1 DataNode
assertEquals(3, blockReports.get(0).size()); // 3 volumes
// FSVolumeList uses Round-Robin block chooser by default. Thus the new
// blocks should be evenly located in all volumes.
int minNumBlocks = Integer.MAX_VALUE;
int maxNumBlocks = Integer.MIN_VALUE;
for (BlockListAsLongs blockList : blockReports.get(0).values()) {
minNumBlocks = Math.min(minNumBlocks, blockList.getNumberOfBlocks());
maxNumBlocks = Math.max(maxNumBlocks, blockList.getNumberOfBlocks());
}
assertTrue(Math.abs(maxNumBlocks - maxNumBlocks) <= 1);
verifyFileLength(cluster.getFileSystem(), testFile, numBlocks);
}
@Test(timeout=60000)
public void testAddVolumesDuringWrite()
throws IOException, InterruptedException, TimeoutException,
ReconfigurationException {
startDFSCluster(1, 1);
String bpid = cluster.getNamesystem().getBlockPoolId();
Path testFile = new Path("/test");
createFile(testFile, 4); // Each volume has 2 blocks.
addVolumes(2);
// Continue to write the same file, thus the new volumes will have blocks.
DFSTestUtil.appendFile(cluster.getFileSystem(), testFile, BLOCK_SIZE * 8);
verifyFileLength(cluster.getFileSystem(), testFile, 8 + 4);
// After appending data, there should be [2, 2, 4, 4] blocks in each volume
// respectively.
List<Integer> expectedNumBlocks = Arrays.asList(2, 2, 4, 4);
List<Map<DatanodeStorage, BlockListAsLongs>> blockReports =
cluster.getAllBlockReports(bpid);
assertEquals(1, blockReports.size()); // 1 DataNode
assertEquals(4, blockReports.get(0).size()); // 4 volumes
Map<DatanodeStorage, BlockListAsLongs> dnReport =
blockReports.get(0);
List<Integer> actualNumBlocks = new ArrayList<Integer>();
for (BlockListAsLongs blockList : dnReport.values()) {
actualNumBlocks.add(blockList.getNumberOfBlocks());
}
Collections.sort(actualNumBlocks);
assertEquals(expectedNumBlocks, actualNumBlocks);
}
@Test(timeout=60000)
public void testAddVolumesToFederationNN()
throws IOException, TimeoutException, InterruptedException,
ReconfigurationException {
// Starts a Cluster with 2 NameNode and 3 DataNodes. Each DataNode has 2
// volumes.
final int numNameNodes = 2;
final int numDataNodes = 1;
startDFSCluster(numNameNodes, numDataNodes);
Path testFile = new Path("/test");
// Create a file on the first namespace with 4 blocks.
createFile(0, testFile, 4);
// Create a file on the second namespace with 4 blocks.
createFile(1, testFile, 4);
// Add 2 volumes to the first DataNode.
final int numNewVolumes = 2;
addVolumes(numNewVolumes);
// Append to the file on the first namespace.
DFSTestUtil.appendFile(cluster.getFileSystem(0), testFile, BLOCK_SIZE * 8);
List<List<Integer>> actualNumBlocks = getNumBlocksReport(0);
assertEquals(cluster.getDataNodes().size(), actualNumBlocks.size());
List<Integer> blocksOnFirstDN = actualNumBlocks.get(0);
Collections.sort(blocksOnFirstDN);
assertEquals(Arrays.asList(2, 2, 4, 4), blocksOnFirstDN);
// Verify the second namespace also has the new volumes and they are empty.
actualNumBlocks = getNumBlocksReport(1);
assertEquals(4, actualNumBlocks.get(0).size());
assertEquals(numNewVolumes,
Collections.frequency(actualNumBlocks.get(0), 0));
}
@Test(timeout=60000)
public void testRemoveOneVolume()
throws ReconfigurationException, InterruptedException, TimeoutException,
IOException {
startDFSCluster(1, 1);
final short replFactor = 1;
Path testFile = new Path("/test");
createFile(testFile, 10, replFactor);
DataNode dn = cluster.getDataNodes().get(0);
Collection<String> oldDirs = getDataDirs(dn);
String newDirs = oldDirs.iterator().next(); // Keep the first volume.
dn.reconfigurePropertyImpl(
DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, newDirs);
assertFileLocksReleased(
new ArrayList<String>(oldDirs).subList(1, oldDirs.size()));
dn.scheduleAllBlockReport(0);
try {
DFSTestUtil.readFile(cluster.getFileSystem(), testFile);
fail("Expect to throw BlockMissingException.");
} catch (BlockMissingException e) {
GenericTestUtils.assertExceptionContains("Could not obtain block", e);
}
Path newFile = new Path("/newFile");
createFile(newFile, 6);
String bpid = cluster.getNamesystem().getBlockPoolId();
List<Map<DatanodeStorage, BlockListAsLongs>> blockReports =
cluster.getAllBlockReports(bpid);
assertEquals((int)replFactor, blockReports.size());
BlockListAsLongs blocksForVolume1 =
blockReports.get(0).values().iterator().next();
// The first volume has half of the testFile and full of newFile.
assertEquals(10 / 2 + 6, blocksForVolume1.getNumberOfBlocks());
}
@Test(timeout=60000)
public void testReplicatingAfterRemoveVolume()
throws InterruptedException, TimeoutException, IOException,
ReconfigurationException {
startDFSCluster(1, 2);
final FileSystem fs = cluster.getFileSystem();
final short replFactor = 2;
Path testFile = new Path("/test");
createFile(testFile, 4, replFactor);
DataNode dn = cluster.getDataNodes().get(0);
Collection<String> oldDirs = getDataDirs(dn);
String newDirs = oldDirs.iterator().next(); // Keep the first volume.
dn.reconfigurePropertyImpl(
DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, newDirs);
assertFileLocksReleased(
new ArrayList<String>(oldDirs).subList(1, oldDirs.size()));
triggerDeleteReport(dn);
waitReplication(fs, testFile, 1, 1);
DFSTestUtil.waitReplication(fs, testFile, replFactor);
}
@Test
public void testAddVolumeFailures() throws IOException {
startDFSCluster(1, 1);
final String dataDir = cluster.getDataDirectory();
DataNode dn = cluster.getDataNodes().get(0);
List<String> newDirs = Lists.newArrayList();
final int NUM_NEW_DIRS = 4;
for (int i = 0; i < NUM_NEW_DIRS; i++) {
File newVolume = new File(dataDir, "new_vol" + i);
newDirs.add(newVolume.toString());
if (i % 2 == 0) {
// Make addVolume() fail.
newVolume.createNewFile();
}
}
String newValue = dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY) + "," +
Joiner.on(",").join(newDirs);
try {
dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, newValue);
fail("Expect to throw IOException.");
} catch (ReconfigurationException e) {
String errorMessage = e.getCause().getMessage();
String messages[] = errorMessage.split("\\r?\\n");
assertEquals(2, messages.length);
assertThat(messages[0], containsString("new_vol0"));
assertThat(messages[1], containsString("new_vol2"));
}
// Make sure that vol0 and vol2's metadata are not left in memory.
FsDatasetSpi<?> dataset = dn.getFSDataset();
try (FsDatasetSpi.FsVolumeReferences volumes =
dataset.getFsVolumeReferences()) {
for (FsVolumeSpi volume : volumes) {
assertThat(volume.getBasePath(), is(not(anyOf(
is(newDirs.get(0)), is(newDirs.get(2))))));
}
}
DataStorage storage = dn.getStorage();
for (int i = 0; i < storage.getNumStorageDirs(); i++) {
Storage.StorageDirectory sd = storage.getStorageDir(i);
assertThat(sd.getRoot().toString(),
is(not(anyOf(is(newDirs.get(0)), is(newDirs.get(2))))));
}
// The newly effective conf does not have vol0 and vol2.
String[] effectiveVolumes =
dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY).split(",");
assertEquals(4, effectiveVolumes.length);
for (String ev : effectiveVolumes) {
assertThat(StorageLocation.parse(ev).getFile().getCanonicalPath(),
is(not(anyOf(is(newDirs.get(0)), is(newDirs.get(2))))));
}
}
/**
* Asserts that the storage lock file in each given directory has been
* released. This method works by trying to acquire the lock file itself. If
* locking fails here, then the main code must have failed to release it.
*
* @param dirs every storage directory to check
* @throws IOException if there is an unexpected I/O error
*/
private static void assertFileLocksReleased(Collection<String> dirs)
throws IOException {
for (String dir: dirs) {
try {
FsDatasetTestUtil.assertFileLockReleased(dir);
} catch (IOException e) {
LOG.warn(e);
}
}
}
@Test(timeout=180000)
public void testRemoveVolumeBeingWritten()
throws InterruptedException, TimeoutException, ReconfigurationException,
IOException, BrokenBarrierException {
// test against removing volumes on the different DataNode on the pipeline.
for (int i = 0; i < 3; i++) {
testRemoveVolumeBeingWrittenForDatanode(i);
}
}
/**
* Test the case that remove a data volume on a particular DataNode when the
* volume is actively being written.
* @param dataNodeIdx the index of the DataNode to remove a volume.
*/
private void testRemoveVolumeBeingWrittenForDatanode(int dataNodeIdx)
throws IOException, ReconfigurationException, TimeoutException,
InterruptedException, BrokenBarrierException {
// Starts DFS cluster with 3 DataNodes to form a pipeline.
startDFSCluster(1, 3);
final short REPLICATION = 3;
final DataNode dn = cluster.getDataNodes().get(dataNodeIdx);
final FileSystem fs = cluster.getFileSystem();
final Path testFile = new Path("/test");
final long lastTimeDiskErrorCheck = dn.getLastDiskErrorCheck();
FSDataOutputStream out = fs.create(testFile, REPLICATION);
Random rb = new Random(0);
byte[] writeBuf = new byte[BLOCK_SIZE / 2]; // half of the block.
rb.nextBytes(writeBuf);
out.write(writeBuf);
out.hflush();
// Make FsDatasetSpi#finalizeBlock a time-consuming operation. So if the
// BlockReceiver releases volume reference before finalizeBlock(), the blocks
// on the volume will be removed, and finalizeBlock() throws IOE.
final FsDatasetSpi<? extends FsVolumeSpi> data = dn.data;
dn.data = Mockito.spy(data);
doAnswer(new Answer<Object>() {
public Object answer(InvocationOnMock invocation)
throws IOException, InterruptedException {
Thread.sleep(1000);
// Bypass the argument to FsDatasetImpl#finalizeBlock to verify that
// the block is not removed, since the volume reference should not
// be released at this point.
data.finalizeBlock((ExtendedBlock) invocation.getArguments()[0]);
return null;
}
}).when(dn.data).finalizeBlock(any(ExtendedBlock.class));
final CyclicBarrier barrier = new CyclicBarrier(2);
List<String> oldDirs = getDataDirs(dn);
final String newDirs = oldDirs.get(1); // Remove the first volume.
final List<Exception> exceptions = new ArrayList<>();
Thread reconfigThread = new Thread() {
public void run() {
try {
barrier.await();
dn.reconfigurePropertyImpl(
DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, newDirs);
} catch (ReconfigurationException |
InterruptedException |
BrokenBarrierException e) {
exceptions.add(e);
}
}
};
reconfigThread.start();
barrier.await();
rb.nextBytes(writeBuf);
out.write(writeBuf);
out.hflush();
out.close();
reconfigThread.join();
// Verify the file has sufficient replications.
DFSTestUtil.waitReplication(fs, testFile, REPLICATION);
// Read the content back
byte[] content = DFSTestUtil.readFileBuffer(fs, testFile);
assertEquals(BLOCK_SIZE, content.length);
// If an IOException thrown from BlockReceiver#run, it triggers
// DataNode#checkDiskError(). So we can test whether checkDiskError() is called,
// to see whether there is IOException in BlockReceiver#run().
assertEquals(lastTimeDiskErrorCheck, dn.getLastDiskErrorCheck());
if (!exceptions.isEmpty()) {
throw new IOException(exceptions.get(0).getCause());
}
}
@Test(timeout=60000)
public void testAddBackRemovedVolume()
throws IOException, TimeoutException, InterruptedException,
ReconfigurationException {
startDFSCluster(1, 2);
// Create some data on every volume.
createFile(new Path("/test"), 32);
DataNode dn = cluster.getDataNodes().get(0);
Configuration conf = dn.getConf();
String oldDataDir = conf.get(DFS_DATANODE_DATA_DIR_KEY);
String keepDataDir = oldDataDir.split(",")[0];
String removeDataDir = oldDataDir.split(",")[1];
dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, keepDataDir);
for (int i = 0; i < cluster.getNumNameNodes(); i++) {
String bpid = cluster.getNamesystem(i).getBlockPoolId();
BlockPoolSliceStorage bpsStorage =
dn.getStorage().getBPStorage(bpid);
// Make sure that there is no block pool level storage under removeDataDir.
for (int j = 0; j < bpsStorage.getNumStorageDirs(); j++) {
Storage.StorageDirectory sd = bpsStorage.getStorageDir(j);
assertFalse(sd.getRoot().getAbsolutePath().startsWith(
new File(removeDataDir).getAbsolutePath()
));
}
assertEquals(dn.getStorage().getBPStorage(bpid).getNumStorageDirs(), 1);
}
// Bring the removed directory back. It only successes if all metadata about
// this directory were removed from the previous step.
dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, oldDataDir);
}
/** Get the FsVolume on the given basePath */
private FsVolumeImpl getVolume(DataNode dn, File basePath)
throws IOException {
try (FsDatasetSpi.FsVolumeReferences volumes =
dn.getFSDataset().getFsVolumeReferences()) {
for (FsVolumeSpi vol : volumes) {
if (vol.getBasePath().equals(basePath.getPath())) {
return (FsVolumeImpl) vol;
}
}
}
return null;
}
/**
* Verify that {@link DataNode#checkDiskErrors()} removes all metadata in
* DataNode upon a volume failure. Thus we can run reconfig on the same
* configuration to reload the new volume on the same directory as the failed one.
*/
@Test(timeout=60000)
public void testDirectlyReloadAfterCheckDiskError()
throws IOException, TimeoutException, InterruptedException,
ReconfigurationException {
// The test uses DataNodeTestUtils#injectDataDirFailure() to simulate
// volume failures which is currently not supported on Windows.
assumeTrue(!Path.WINDOWS);
startDFSCluster(1, 2);
createFile(new Path("/test"), 32, (short)2);
DataNode dn = cluster.getDataNodes().get(0);
final String oldDataDir = dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY);
File dirToFail = new File(cluster.getDataDirectory(), "data1");
FsVolumeImpl failedVolume = getVolume(dn, dirToFail);
assertTrue("No FsVolume was found for " + dirToFail,
failedVolume != null);
long used = failedVolume.getDfsUsed();
DataNodeTestUtils.injectDataDirFailure(dirToFail);
// Call and wait DataNode to detect disk failure.
long lastDiskErrorCheck = dn.getLastDiskErrorCheck();
dn.checkDiskErrorAsync();
while (dn.getLastDiskErrorCheck() == lastDiskErrorCheck) {
Thread.sleep(100);
}
createFile(new Path("/test1"), 32, (short)2);
assertEquals(used, failedVolume.getDfsUsed());
DataNodeTestUtils.restoreDataDirFromFailure(dirToFail);
dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, oldDataDir);
createFile(new Path("/test2"), 32, (short)2);
FsVolumeImpl restoredVolume = getVolume(dn, dirToFail);
assertTrue(restoredVolume != null);
assertTrue(restoredVolume != failedVolume);
// More data has been written to this volume.
assertTrue(restoredVolume.getDfsUsed() > used);
}
/** Test that a full block report is sent after hot swapping volumes */
@Test(timeout=100000)
public void testFullBlockReportAfterRemovingVolumes()
throws IOException, ReconfigurationException {
Configuration conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
// Similar to TestTriggerBlockReport, set a really long value for
// dfs.heartbeat.interval, so that incremental block reports and heartbeats
// won't be sent during this test unless they're triggered
// manually.
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10800000L);
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1080L);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
cluster.waitActive();
final DataNode dn = cluster.getDataNodes().get(0);
DatanodeProtocolClientSideTranslatorPB spy =
DataNodeTestUtils.spyOnBposToNN(dn, cluster.getNameNode());
// Remove a data dir from datanode
File dataDirToKeep = new File(cluster.getDataDirectory(), "data1");
dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, dataDirToKeep.toString());
// We should get 1 full report
Mockito.verify(spy, timeout(60000).times(1)).blockReport(
any(DatanodeRegistration.class),
anyString(),
any(StorageBlockReport[].class),
any(BlockReportContext.class));
}
}
| 30,627 | 37.86802 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBlockReports.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import static org.junit.Assert.assertFalse;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.atLeastOnce;
import static org.mockito.Mockito.times;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.*;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus;
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
/**
* Verify that incremental block reports are generated in response to
* block additions/deletions.
*/
public class TestIncrementalBlockReports {
public static final Log LOG = LogFactory.getLog(TestIncrementalBlockReports.class);
private static final short DN_COUNT = 1;
private static final long DUMMY_BLOCK_ID = 5678;
private static final long DUMMY_BLOCK_LENGTH = 1024 * 1024;
private static final long DUMMY_BLOCK_GENSTAMP = 1000;
private MiniDFSCluster cluster = null;
private DistributedFileSystem fs;
private Configuration conf;
private NameNode singletonNn;
private DataNode singletonDn;
private BPOfferService bpos; // BPOS to use for block injection.
private BPServiceActor actor; // BPSA to use for block injection.
private String storageUuid; // DatanodeStorage to use for block injection.
@Before
public void startCluster() throws IOException {
conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DN_COUNT).build();
fs = cluster.getFileSystem();
singletonNn = cluster.getNameNode();
singletonDn = cluster.getDataNodes().get(0);
bpos = singletonDn.getAllBpOs().get(0);
actor = bpos.getBPServiceActors().get(0);
try (FsDatasetSpi.FsVolumeReferences volumes =
singletonDn.getFSDataset().getFsVolumeReferences()) {
storageUuid = volumes.get(0).getStorageID();
}
}
private static Block getDummyBlock() {
return new Block(DUMMY_BLOCK_ID, DUMMY_BLOCK_LENGTH, DUMMY_BLOCK_GENSTAMP);
}
/**
* Inject a fake 'received' block into the BPServiceActor state.
*/
private void injectBlockReceived() {
ReceivedDeletedBlockInfo rdbi = new ReceivedDeletedBlockInfo(
getDummyBlock(), BlockStatus.RECEIVED_BLOCK, null);
actor.notifyNamenodeBlock(rdbi, storageUuid, true);
}
/**
* Inject a fake 'deleted' block into the BPServiceActor state.
*/
private void injectBlockDeleted() {
ReceivedDeletedBlockInfo rdbi = new ReceivedDeletedBlockInfo(
getDummyBlock(), BlockStatus.DELETED_BLOCK, null);
actor.notifyNamenodeDeletedBlock(rdbi, storageUuid);
}
/**
* Spy on calls from the DN to the NN.
* @return spy object that can be used for Mockito verification.
*/
DatanodeProtocolClientSideTranslatorPB spyOnDnCallsToNn() {
return DataNodeTestUtils.spyOnBposToNN(singletonDn, singletonNn);
}
/**
* Ensure that an IBR is generated immediately for a block received by
* the DN.
*
* @throws InterruptedException
* @throws IOException
*/
@Test (timeout=60000)
public void testReportBlockReceived() throws InterruptedException, IOException {
try {
DatanodeProtocolClientSideTranslatorPB nnSpy = spyOnDnCallsToNn();
injectBlockReceived();
// Sleep for a very short time, this is necessary since the IBR is
// generated asynchronously.
Thread.sleep(2000);
// Ensure that the received block was reported immediately.
Mockito.verify(nnSpy, times(1)).blockReceivedAndDeleted(
any(DatanodeRegistration.class),
anyString(),
any(StorageReceivedDeletedBlocks[].class));
} finally {
cluster.shutdown();
cluster = null;
}
}
/**
* Ensure that a delayed IBR is generated for a block deleted on the DN.
*
* @throws InterruptedException
* @throws IOException
*/
@Test (timeout=60000)
public void testReportBlockDeleted() throws InterruptedException, IOException {
try {
// Trigger a block report to reset the IBR timer.
DataNodeTestUtils.triggerBlockReport(singletonDn);
// Spy on calls from the DN to the NN
DatanodeProtocolClientSideTranslatorPB nnSpy = spyOnDnCallsToNn();
injectBlockDeleted();
// Sleep for a very short time since IBR is generated
// asynchronously.
Thread.sleep(2000);
// Ensure that no block report was generated immediately.
// Deleted blocks are reported when the IBR timer elapses.
Mockito.verify(nnSpy, times(0)).blockReceivedAndDeleted(
any(DatanodeRegistration.class),
anyString(),
any(StorageReceivedDeletedBlocks[].class));
// Trigger a heartbeat, this also triggers an IBR.
DataNodeTestUtils.triggerHeartbeat(singletonDn);
Thread.sleep(2000);
// Ensure that the deleted block is reported.
Mockito.verify(nnSpy, times(1)).blockReceivedAndDeleted(
any(DatanodeRegistration.class),
anyString(),
any(StorageReceivedDeletedBlocks[].class));
} finally {
cluster.shutdown();
cluster = null;
}
}
/**
* Add a received block entry and then replace it. Ensure that a single
* IBR is generated and that pending receive request state is cleared.
* This test case verifies the failure in HDFS-5922.
*
* @throws InterruptedException
* @throws IOException
*/
@Test (timeout=60000)
public void testReplaceReceivedBlock() throws InterruptedException, IOException {
try {
// Spy on calls from the DN to the NN
DatanodeProtocolClientSideTranslatorPB nnSpy = spyOnDnCallsToNn();
injectBlockReceived();
injectBlockReceived(); // Overwrite the existing entry.
// Sleep for a very short time since IBR is generated
// asynchronously.
Thread.sleep(2000);
// Ensure that the received block is reported.
Mockito.verify(nnSpy, atLeastOnce()).blockReceivedAndDeleted(
any(DatanodeRegistration.class),
anyString(),
any(StorageReceivedDeletedBlocks[].class));
// Ensure that no more IBRs are pending.
assertFalse(actor.hasPendingIBR());
} finally {
cluster.shutdown();
cluster = null;
}
}
}
| 7,669 | 34.345622 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBpServiceActorScheduler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
import static java.lang.Math.abs;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.spy;
import org.apache.hadoop.hdfs.server.datanode.BPServiceActor.Scheduler;
import java.util.Arrays;
import java.util.List;
import java.util.Random;
/**
* Verify the block report and heartbeat scheduling logic of BPServiceActor
* using a few different values .
*/
public class TestBpServiceActorScheduler {
protected static final Log LOG = LogFactory.getLog(TestBpServiceActorScheduler.class);
@Rule
public Timeout timeout = new Timeout(300000);
private static final long HEARTBEAT_INTERVAL_MS = 5000; // 5 seconds
private static final long BLOCK_REPORT_INTERVAL_MS = 10000; // 10 seconds
private final Random random = new Random(System.nanoTime());
@Test
public void testInit() {
for (final long now : getTimestamps()) {
Scheduler scheduler = makeMockScheduler(now);
assertTrue(scheduler.isHeartbeatDue(now));
assertTrue(scheduler.isBlockReportDue(scheduler.monotonicNow()));
}
}
@Test
public void testScheduleBlockReportImmediate() {
for (final long now : getTimestamps()) {
Scheduler scheduler = makeMockScheduler(now);
scheduler.scheduleBlockReport(0);
assertTrue(scheduler.resetBlockReportTime);
assertThat(scheduler.nextBlockReportTime, is(now));
}
}
@Test
public void testScheduleBlockReportDelayed() {
for (final long now : getTimestamps()) {
Scheduler scheduler = makeMockScheduler(now);
final long delayMs = 10;
scheduler.scheduleBlockReport(delayMs);
assertTrue(scheduler.resetBlockReportTime);
assertTrue(scheduler.nextBlockReportTime - now >= 0);
assertTrue(scheduler.nextBlockReportTime - (now + delayMs) < 0);
}
}
/**
* If resetBlockReportTime is true then the next block report must be scheduled
* in the range [now, now + BLOCK_REPORT_INTERVAL_SEC).
*/
@Test
public void testScheduleNextBlockReport() {
for (final long now : getTimestamps()) {
Scheduler scheduler = makeMockScheduler(now);
assertTrue(scheduler.resetBlockReportTime);
scheduler.scheduleNextBlockReport();
assertTrue(scheduler.nextBlockReportTime - (now + BLOCK_REPORT_INTERVAL_MS) < 0);
}
}
/**
* If resetBlockReportTime is false then the next block report must be scheduled
* exactly at (now + BLOCK_REPORT_INTERVAL_SEC).
*/
@Test
public void testScheduleNextBlockReport2() {
for (final long now : getTimestamps()) {
Scheduler scheduler = makeMockScheduler(now);
scheduler.resetBlockReportTime = false;
scheduler.scheduleNextBlockReport();
assertThat(scheduler.nextBlockReportTime, is(now + BLOCK_REPORT_INTERVAL_MS));
}
}
/**
* Tests the case when a block report was delayed past its scheduled time.
* In that case the next block report should not be delayed for a full interval.
*/
@Test
public void testScheduleNextBlockReport3() {
for (final long now : getTimestamps()) {
Scheduler scheduler = makeMockScheduler(now);
scheduler.resetBlockReportTime = false;
// Make it look like the block report was scheduled to be sent between 1-3
// intervals ago but sent just now.
final long blockReportDelay =
BLOCK_REPORT_INTERVAL_MS + random.nextInt(2 * (int) BLOCK_REPORT_INTERVAL_MS);
final long origBlockReportTime = now - blockReportDelay;
scheduler.nextBlockReportTime = origBlockReportTime;
scheduler.scheduleNextBlockReport();
assertTrue(scheduler.nextBlockReportTime - now < BLOCK_REPORT_INTERVAL_MS);
assertTrue(((scheduler.nextBlockReportTime - origBlockReportTime) % BLOCK_REPORT_INTERVAL_MS) == 0);
}
}
@Test
public void testScheduleHeartbeat() {
for (final long now : getTimestamps()) {
Scheduler scheduler = makeMockScheduler(now);
scheduler.scheduleNextHeartbeat();
assertFalse(scheduler.isHeartbeatDue(now));
scheduler.scheduleHeartbeat();
assertTrue(scheduler.isHeartbeatDue(now));
}
}
private Scheduler makeMockScheduler(long now) {
LOG.info("Using now = " + now);
Scheduler mockScheduler = spy(new Scheduler(HEARTBEAT_INTERVAL_MS, BLOCK_REPORT_INTERVAL_MS));
doReturn(now).when(mockScheduler).monotonicNow();
mockScheduler.nextBlockReportTime = now;
mockScheduler.nextHeartbeatTime = now;
return mockScheduler;
}
List<Long> getTimestamps() {
return Arrays.asList(
0L, Long.MIN_VALUE, Long.MAX_VALUE, // test boundaries
Long.MAX_VALUE - 1, // test integer overflow
abs(random.nextLong()), // positive random
-abs(random.nextLong())); // negative random
}
}
| 5,962 | 35.359756 | 106 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import java.io.File;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
import org.mockito.Mockito;
import com.google.common.base.Preconditions;
/**
* Utility class for accessing package-private DataNode information during tests.
*
*/
public class DataNodeTestUtils {
private static final String DIR_FAILURE_SUFFIX = ".origin";
public static DatanodeRegistration
getDNRegistrationForBP(DataNode dn, String bpid) throws IOException {
return dn.getDNRegistrationForBP(bpid);
}
public static void setHeartbeatsDisabledForTests(DataNode dn,
boolean heartbeatsDisabledForTests) {
dn.setHeartbeatsDisabledForTests(heartbeatsDisabledForTests);
}
/**
* Set if cache reports are disabled for all DNs in a mini cluster.
*/
public static void setCacheReportsDisabledForTests(MiniDFSCluster cluster,
boolean disabled) {
for (DataNode dn : cluster.getDataNodes()) {
dn.setCacheReportsDisabledForTest(disabled);
}
}
public static void triggerDeletionReport(DataNode dn) throws IOException {
for (BPOfferService bpos : dn.getAllBpOs()) {
bpos.triggerDeletionReportForTests();
}
}
public static void triggerHeartbeat(DataNode dn) throws IOException {
for (BPOfferService bpos : dn.getAllBpOs()) {
bpos.triggerHeartbeatForTests();
}
}
public static void triggerBlockReport(DataNode dn) throws IOException {
for (BPOfferService bpos : dn.getAllBpOs()) {
bpos.triggerBlockReportForTests();
}
}
/**
* Insert a Mockito spy object between the given DataNode and
* the given NameNode. This can be used to delay or wait for
* RPC calls on the datanode->NN path.
*/
public static DatanodeProtocolClientSideTranslatorPB spyOnBposToNN(
DataNode dn, NameNode nn) {
String bpid = nn.getNamesystem().getBlockPoolId();
BPOfferService bpos = null;
for (BPOfferService thisBpos : dn.getAllBpOs()) {
if (thisBpos.getBlockPoolId().equals(bpid)) {
bpos = thisBpos;
break;
}
}
Preconditions.checkArgument(bpos != null,
"No such bpid: %s", bpid);
BPServiceActor bpsa = null;
for (BPServiceActor thisBpsa : bpos.getBPServiceActors()) {
if (thisBpsa.getNNSocketAddress().equals(nn.getServiceRpcAddress())) {
bpsa = thisBpsa;
break;
}
}
Preconditions.checkArgument(bpsa != null,
"No service actor to NN at %s", nn.getServiceRpcAddress());
DatanodeProtocolClientSideTranslatorPB origNN = bpsa.getNameNodeProxy();
DatanodeProtocolClientSideTranslatorPB spy = Mockito.spy(origNN);
bpsa.setNameNode(spy);
return spy;
}
public static InterDatanodeProtocol createInterDatanodeProtocolProxy(
DataNode dn, DatanodeID datanodeid, final Configuration conf,
boolean connectToDnViaHostname) throws IOException {
if (connectToDnViaHostname != dn.getDnConf().connectToDnViaHostname) {
throw new AssertionError("Unexpected DN hostname configuration");
}
return DataNode.createInterDataNodeProtocolProxy(datanodeid, conf,
dn.getDnConf().socketTimeout, dn.getDnConf().connectToDnViaHostname);
}
/**
* This method is used for testing.
* Examples are adding and deleting blocks directly.
* The most common usage will be when the data node's storage is simulated.
*
* @return the fsdataset that stores the blocks
*/
public static FsDatasetSpi<?> getFSDataset(DataNode dn) {
return dn.getFSDataset();
}
public static File getFile(DataNode dn, String bpid, long bid) {
return FsDatasetTestUtil.getFile(dn.getFSDataset(), bpid, bid);
}
public static File getBlockFile(DataNode dn, String bpid, Block b
) throws IOException {
return FsDatasetTestUtil.getBlockFile(dn.getFSDataset(), bpid, b);
}
public static File getMetaFile(DataNode dn, String bpid, Block b)
throws IOException {
return FsDatasetTestUtil.getMetaFile(dn.getFSDataset(), bpid, b);
}
public static boolean unlinkBlock(DataNode dn, ExtendedBlock bk, int numLinks
) throws IOException {
return FsDatasetTestUtil.unlinkBlock(dn.getFSDataset(), bk, numLinks);
}
public static long getPendingAsyncDeletions(DataNode dn) {
return FsDatasetTestUtil.getPendingAsyncDeletions(dn.getFSDataset());
}
/**
* Fetch a copy of ReplicaInfo from a datanode by block id
* @param dn datanode to retrieve a replicainfo object from
* @param bpid Block pool Id
* @param blkId id of the replica's block
* @return copy of ReplicaInfo object @link{FSDataset#fetchReplicaInfo}
*/
public static ReplicaInfo fetchReplicaInfo(final DataNode dn,
final String bpid, final long blkId) {
return FsDatasetTestUtil.fetchReplicaInfo(dn.getFSDataset(), bpid, blkId);
}
/**
* It injects disk failures to data dirs by replacing these data dirs with
* regular files.
*
* @param dirs data directories.
* @throws IOException on I/O error.
*/
public static void injectDataDirFailure(File... dirs) throws IOException {
for (File dir : dirs) {
File renamedTo = new File(dir.getPath() + DIR_FAILURE_SUFFIX);
if (renamedTo.exists()) {
throw new IOException(String.format(
"Can not inject failure to dir: %s because %s exists.",
dir, renamedTo));
}
if (!dir.renameTo(renamedTo)) {
throw new IOException(String.format("Failed to rename %s to %s.",
dir, renamedTo));
}
if (!dir.createNewFile()) {
throw new IOException(String.format(
"Failed to create file %s to inject disk failure.", dir));
}
}
}
/**
* Restore the injected data dir failures.
*
* @see {@link #injectDataDirFailures}.
* @param dirs data directories.
* @throws IOException
*/
public static void restoreDataDirFromFailure(File... dirs)
throws IOException {
for (File dir : dirs) {
File renamedDir = new File(dir.getPath() + DIR_FAILURE_SUFFIX);
if (renamedDir.exists()) {
if (dir.exists()) {
if (!dir.isFile()) {
throw new IOException(
"Injected failure data dir is supposed to be file: " + dir);
}
if (!dir.delete()) {
throw new IOException(
"Failed to delete injected failure data dir: " + dir);
}
}
if (!renamedDir.renameTo(dir)) {
throw new IOException(String.format(
"Failed to recover injected failure data dir %s to %s.",
renamedDir, dir));
}
}
}
}
public static void runDirectoryScanner(DataNode dn) throws IOException {
DirectoryScanner directoryScanner = dn.getDirectoryScanner();
if (directoryScanner != null) {
dn.getDirectoryScanner().reconcile();
}
}
}
| 8,340 | 33.754167 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyBoolean;
import static org.mockito.Matchers.anyInt;
import static org.mockito.Matchers.anyLong;
import static org.mockito.Mockito.doReturn;
import java.io.FileInputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.ThreadLocalRandom;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.HdfsBlockLocation;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.hdfs.BlockReaderTestUtil;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.LogVerificationAppender;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetCache;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetCache.PageRounder;
import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
import org.apache.hadoop.hdfs.server.namenode.FSImage;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.protocol.BlockIdCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.io.nativeio.NativeIO.POSIX.CacheManipulator;
import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.MetricsAsserts;
import org.apache.log4j.Logger;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.apache.log4j.Level;
import org.apache.log4j.LogManager;
import com.google.common.base.Supplier;
import com.google.common.primitives.Ints;
public class TestFsDatasetCache {
private static final Log LOG = LogFactory.getLog(TestFsDatasetCache.class);
// Most Linux installs allow a default of 64KB locked memory
static final long CACHE_CAPACITY = 64 * 1024;
// mlock always locks the entire page. So we don't need to deal with this
// rounding, use the OS page size for the block size.
private static final long PAGE_SIZE =
NativeIO.POSIX.getCacheManipulator().getOperatingSystemPageSize();
private static final long BLOCK_SIZE = PAGE_SIZE;
private static Configuration conf;
private static MiniDFSCluster cluster = null;
private static FileSystem fs;
private static NameNode nn;
private static FSImage fsImage;
private static DataNode dn;
private static FsDatasetSpi<?> fsd;
private static DatanodeProtocolClientSideTranslatorPB spyNN;
private static final PageRounder rounder = new PageRounder();
private static CacheManipulator prevCacheManipulator;
static {
LogManager.getLogger(FsDatasetCache.class).setLevel(Level.DEBUG);
}
@Before
public void setUp() throws Exception {
conf = new HdfsConfiguration();
conf.setLong(
DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS, 100);
conf.setLong(DFSConfigKeys.DFS_CACHEREPORT_INTERVAL_MSEC_KEY, 500);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
CACHE_CAPACITY);
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
prevCacheManipulator = NativeIO.POSIX.getCacheManipulator();
NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator());
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(1).build();
cluster.waitActive();
fs = cluster.getFileSystem();
nn = cluster.getNameNode();
fsImage = nn.getFSImage();
dn = cluster.getDataNodes().get(0);
fsd = dn.getFSDataset();
spyNN = DataNodeTestUtils.spyOnBposToNN(dn, nn);
}
@After
public void tearDown() throws Exception {
// Verify that each test uncached whatever it cached. This cleanup is
// required so that file descriptors are not leaked across tests.
DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd);
if (fs != null) {
fs.close();
}
if (cluster != null) {
cluster.shutdown();
}
// Restore the original CacheManipulator
NativeIO.POSIX.setCacheManipulator(prevCacheManipulator);
}
private static void setHeartbeatResponse(DatanodeCommand[] cmds)
throws IOException {
NNHAStatusHeartbeat ha = new NNHAStatusHeartbeat(HAServiceState.ACTIVE,
fsImage.getLastAppliedOrWrittenTxId());
HeartbeatResponse response =
new HeartbeatResponse(cmds, ha, null,
ThreadLocalRandom.current().nextLong() | 1L);
doReturn(response).when(spyNN).sendHeartbeat(
(DatanodeRegistration) any(),
(StorageReport[]) any(), anyLong(), anyLong(),
anyInt(), anyInt(), anyInt(), (VolumeFailureSummary) any(),
anyBoolean());
}
private static DatanodeCommand[] cacheBlock(HdfsBlockLocation loc) {
return cacheBlocks(new HdfsBlockLocation[] {loc});
}
private static DatanodeCommand[] cacheBlocks(HdfsBlockLocation[] locs) {
return new DatanodeCommand[] {
getResponse(locs, DatanodeProtocol.DNA_CACHE)
};
}
private static DatanodeCommand[] uncacheBlock(HdfsBlockLocation loc) {
return uncacheBlocks(new HdfsBlockLocation[] {loc});
}
private static DatanodeCommand[] uncacheBlocks(HdfsBlockLocation[] locs) {
return new DatanodeCommand[] {
getResponse(locs, DatanodeProtocol.DNA_UNCACHE)
};
}
/**
* Creates a cache or uncache DatanodeCommand from an array of locations
*/
private static DatanodeCommand getResponse(HdfsBlockLocation[] locs,
int action) {
String bpid = locs[0].getLocatedBlock().getBlock().getBlockPoolId();
long[] blocks = new long[locs.length];
for (int i=0; i<locs.length; i++) {
blocks[i] = locs[i].getLocatedBlock().getBlock().getBlockId();
}
return new BlockIdCommand(action, bpid, blocks);
}
private static long[] getBlockSizes(HdfsBlockLocation[] locs)
throws Exception {
long[] sizes = new long[locs.length];
for (int i=0; i<locs.length; i++) {
HdfsBlockLocation loc = locs[i];
String bpid = loc.getLocatedBlock().getBlock().getBlockPoolId();
Block block = loc.getLocatedBlock().getBlock().getLocalBlock();
ExtendedBlock extBlock = new ExtendedBlock(bpid, block);
FileInputStream blockInputStream = null;
FileChannel blockChannel = null;
try {
blockInputStream =
(FileInputStream)fsd.getBlockInputStream(extBlock, 0);
blockChannel = blockInputStream.getChannel();
sizes[i] = blockChannel.size();
} finally {
IOUtils.cleanup(LOG, blockChannel, blockInputStream);
}
}
return sizes;
}
private void testCacheAndUncacheBlock() throws Exception {
LOG.info("beginning testCacheAndUncacheBlock");
final int NUM_BLOCKS = 5;
DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd);
assertEquals(0, fsd.getNumBlocksCached());
// Write a test file
final Path testFile = new Path("/testCacheBlock");
final long testFileLen = BLOCK_SIZE*NUM_BLOCKS;
DFSTestUtil.createFile(fs, testFile, testFileLen, (short)1, 0xABBAl);
// Get the details of the written file
HdfsBlockLocation[] locs =
(HdfsBlockLocation[])fs.getFileBlockLocations(testFile, 0, testFileLen);
assertEquals("Unexpected number of blocks", NUM_BLOCKS, locs.length);
final long[] blockSizes = getBlockSizes(locs);
// Check initial state
final long cacheCapacity = fsd.getCacheCapacity();
long cacheUsed = fsd.getCacheUsed();
long current = 0;
assertEquals("Unexpected cache capacity", CACHE_CAPACITY, cacheCapacity);
assertEquals("Unexpected amount of cache used", current, cacheUsed);
MetricsRecordBuilder dnMetrics;
long numCacheCommands = 0;
long numUncacheCommands = 0;
// Cache each block in succession, checking each time
for (int i=0; i<NUM_BLOCKS; i++) {
setHeartbeatResponse(cacheBlock(locs[i]));
current = DFSTestUtil.verifyExpectedCacheUsage(
current + blockSizes[i], i + 1, fsd);
dnMetrics = getMetrics(dn.getMetrics().name());
long cmds = MetricsAsserts.getLongCounter("BlocksCached", dnMetrics);
assertTrue("Expected more cache requests from the NN ("
+ cmds + " <= " + numCacheCommands + ")",
cmds > numCacheCommands);
numCacheCommands = cmds;
}
// Uncache each block in succession, again checking each time
for (int i=0; i<NUM_BLOCKS; i++) {
setHeartbeatResponse(uncacheBlock(locs[i]));
current = DFSTestUtil.
verifyExpectedCacheUsage(current - blockSizes[i],
NUM_BLOCKS - 1 - i, fsd);
dnMetrics = getMetrics(dn.getMetrics().name());
long cmds = MetricsAsserts.getLongCounter("BlocksUncached", dnMetrics);
assertTrue("Expected more uncache requests from the NN",
cmds > numUncacheCommands);
numUncacheCommands = cmds;
}
LOG.info("finishing testCacheAndUncacheBlock");
}
@Test(timeout=600000)
public void testCacheAndUncacheBlockSimple() throws Exception {
testCacheAndUncacheBlock();
}
/**
* Run testCacheAndUncacheBlock with some failures injected into the mlock
* call. This tests the ability of the NameNode to resend commands.
*/
@Test(timeout=600000)
public void testCacheAndUncacheBlockWithRetries() throws Exception {
// We don't have to save the previous cacheManipulator
// because it will be reinstalled by the @After function.
NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator() {
private final Set<String> seenIdentifiers = new HashSet<String>();
@Override
public void mlock(String identifier,
ByteBuffer mmap, long length) throws IOException {
if (seenIdentifiers.contains(identifier)) {
// mlock succeeds the second time.
LOG.info("mlocking " + identifier);
return;
}
seenIdentifiers.add(identifier);
throw new IOException("injecting IOException during mlock of " +
identifier);
}
});
testCacheAndUncacheBlock();
}
@Test(timeout=600000)
public void testFilesExceedMaxLockedMemory() throws Exception {
LOG.info("beginning testFilesExceedMaxLockedMemory");
// Create some test files that will exceed total cache capacity
final int numFiles = 5;
final long fileSize = CACHE_CAPACITY / (numFiles-1);
final Path[] testFiles = new Path[numFiles];
final HdfsBlockLocation[][] fileLocs = new HdfsBlockLocation[numFiles][];
final long[] fileSizes = new long[numFiles];
for (int i=0; i<numFiles; i++) {
testFiles[i] = new Path("/testFilesExceedMaxLockedMemory-" + i);
DFSTestUtil.createFile(fs, testFiles[i], fileSize, (short)1, 0xDFAl);
fileLocs[i] = (HdfsBlockLocation[])fs.getFileBlockLocations(
testFiles[i], 0, fileSize);
// Get the file size (sum of blocks)
long[] sizes = getBlockSizes(fileLocs[i]);
for (int j=0; j<sizes.length; j++) {
fileSizes[i] += sizes[j];
}
}
// Cache the first n-1 files
long total = 0;
DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd);
for (int i=0; i<numFiles-1; i++) {
setHeartbeatResponse(cacheBlocks(fileLocs[i]));
total = DFSTestUtil.verifyExpectedCacheUsage(
rounder.roundUp(total + fileSizes[i]), 4 * (i + 1), fsd);
}
// nth file should hit a capacity exception
final LogVerificationAppender appender = new LogVerificationAppender();
final Logger logger = Logger.getRootLogger();
logger.addAppender(appender);
setHeartbeatResponse(cacheBlocks(fileLocs[numFiles-1]));
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
int lines = appender.countLinesWithMessage(
"more bytes in the cache: " +
DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY);
return lines > 0;
}
}, 500, 30000);
// Also check the metrics for the failure
assertTrue("Expected more than 0 failed cache attempts",
fsd.getNumBlocksFailedToCache() > 0);
// Uncache the n-1 files
int curCachedBlocks = 16;
for (int i=0; i<numFiles-1; i++) {
setHeartbeatResponse(uncacheBlocks(fileLocs[i]));
long uncachedBytes = rounder.roundUp(fileSizes[i]);
total -= uncachedBytes;
curCachedBlocks -= uncachedBytes / BLOCK_SIZE;
DFSTestUtil.verifyExpectedCacheUsage(total, curCachedBlocks, fsd);
}
LOG.info("finishing testFilesExceedMaxLockedMemory");
}
@Test(timeout=600000)
public void testUncachingBlocksBeforeCachingFinishes() throws Exception {
LOG.info("beginning testUncachingBlocksBeforeCachingFinishes");
final int NUM_BLOCKS = 5;
DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd);
// Write a test file
final Path testFile = new Path("/testCacheBlock");
final long testFileLen = BLOCK_SIZE*NUM_BLOCKS;
DFSTestUtil.createFile(fs, testFile, testFileLen, (short)1, 0xABBAl);
// Get the details of the written file
HdfsBlockLocation[] locs =
(HdfsBlockLocation[])fs.getFileBlockLocations(testFile, 0, testFileLen);
assertEquals("Unexpected number of blocks", NUM_BLOCKS, locs.length);
final long[] blockSizes = getBlockSizes(locs);
// Check initial state
final long cacheCapacity = fsd.getCacheCapacity();
long cacheUsed = fsd.getCacheUsed();
long current = 0;
assertEquals("Unexpected cache capacity", CACHE_CAPACITY, cacheCapacity);
assertEquals("Unexpected amount of cache used", current, cacheUsed);
NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator() {
@Override
public void mlock(String identifier,
ByteBuffer mmap, long length) throws IOException {
LOG.info("An mlock operation is starting on " + identifier);
try {
Thread.sleep(3000);
} catch (InterruptedException e) {
Assert.fail();
}
}
});
// Starting caching each block in succession. The usedBytes amount
// should increase, even though caching doesn't complete on any of them.
for (int i=0; i<NUM_BLOCKS; i++) {
setHeartbeatResponse(cacheBlock(locs[i]));
current = DFSTestUtil.verifyExpectedCacheUsage(
current + blockSizes[i], i + 1, fsd);
}
setHeartbeatResponse(new DatanodeCommand[] {
getResponse(locs, DatanodeProtocol.DNA_UNCACHE)
});
// wait until all caching jobs are finished cancelling.
current = DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd);
LOG.info("finishing testUncachingBlocksBeforeCachingFinishes");
}
@Test(timeout=60000)
public void testUncacheUnknownBlock() throws Exception {
// Create a file
Path fileName = new Path("/testUncacheUnknownBlock");
int fileLen = 4096;
DFSTestUtil.createFile(fs, fileName, fileLen, (short)1, 0xFDFD);
HdfsBlockLocation[] locs = (HdfsBlockLocation[])fs.getFileBlockLocations(
fileName, 0, fileLen);
// Try to uncache it without caching it first
setHeartbeatResponse(uncacheBlocks(locs));
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
return fsd.getNumBlocksFailedToUncache() > 0;
}
}, 100, 10000);
}
@Test(timeout=60000)
public void testPageRounder() throws Exception {
// Write a small file
Path fileName = new Path("/testPageRounder");
final int smallBlocks = 512; // This should be smaller than the page size
assertTrue("Page size should be greater than smallBlocks!",
PAGE_SIZE > smallBlocks);
final int numBlocks = 5;
final int fileLen = smallBlocks * numBlocks;
FSDataOutputStream out =
fs.create(fileName, false, 4096, (short)1, smallBlocks);
out.write(new byte[fileLen]);
out.close();
HdfsBlockLocation[] locs = (HdfsBlockLocation[])fs.getFileBlockLocations(
fileName, 0, fileLen);
// Cache the file and check the sizes match the page size
setHeartbeatResponse(cacheBlocks(locs));
DFSTestUtil.verifyExpectedCacheUsage(PAGE_SIZE * numBlocks, numBlocks, fsd);
// Uncache and check that it decrements by the page size too
setHeartbeatResponse(uncacheBlocks(locs));
DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd);
}
@Test(timeout=60000)
public void testUncacheQuiesces() throws Exception {
// Create a file
Path fileName = new Path("/testUncacheQuiesces");
int fileLen = 4096;
DFSTestUtil.createFile(fs, fileName, fileLen, (short)1, 0xFDFD);
// Cache it
DistributedFileSystem dfs = cluster.getFileSystem();
dfs.addCachePool(new CachePoolInfo("pool"));
dfs.addCacheDirective(new CacheDirectiveInfo.Builder()
.setPool("pool").setPath(fileName).setReplication((short)3).build());
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
MetricsRecordBuilder dnMetrics = getMetrics(dn.getMetrics().name());
long blocksCached =
MetricsAsserts.getLongCounter("BlocksCached", dnMetrics);
return blocksCached > 0;
}
}, 1000, 30000);
// Uncache it
dfs.removeCacheDirective(1);
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
MetricsRecordBuilder dnMetrics = getMetrics(dn.getMetrics().name());
long blocksUncached =
MetricsAsserts.getLongCounter("BlocksUncached", dnMetrics);
return blocksUncached > 0;
}
}, 1000, 30000);
// Make sure that no additional messages were sent
Thread.sleep(10000);
MetricsRecordBuilder dnMetrics = getMetrics(dn.getMetrics().name());
MetricsAsserts.assertCounter("BlocksCached", 1l, dnMetrics);
MetricsAsserts.assertCounter("BlocksUncached", 1l, dnMetrics);
}
@Test(timeout=60000)
public void testReCacheAfterUncache() throws Exception {
final int TOTAL_BLOCKS_PER_CACHE =
Ints.checkedCast(CACHE_CAPACITY / BLOCK_SIZE);
BlockReaderTestUtil.enableHdfsCachingTracing();
Assert.assertEquals(0, CACHE_CAPACITY % BLOCK_SIZE);
// Create a small file
final Path SMALL_FILE = new Path("/smallFile");
DFSTestUtil.createFile(fs, SMALL_FILE,
BLOCK_SIZE, (short)1, 0xcafe);
// Create a file that will take up the whole cache
final Path BIG_FILE = new Path("/bigFile");
DFSTestUtil.createFile(fs, BIG_FILE,
TOTAL_BLOCKS_PER_CACHE * BLOCK_SIZE, (short)1, 0xbeef);
final DistributedFileSystem dfs = cluster.getFileSystem();
dfs.addCachePool(new CachePoolInfo("pool"));
final long bigCacheDirectiveId =
dfs.addCacheDirective(new CacheDirectiveInfo.Builder()
.setPool("pool").setPath(BIG_FILE).setReplication((short)1).build());
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
MetricsRecordBuilder dnMetrics = getMetrics(dn.getMetrics().name());
long blocksCached =
MetricsAsserts.getLongCounter("BlocksCached", dnMetrics);
if (blocksCached != TOTAL_BLOCKS_PER_CACHE) {
LOG.info("waiting for " + TOTAL_BLOCKS_PER_CACHE + " to " +
"be cached. Right now only " + blocksCached + " blocks are cached.");
return false;
}
LOG.info(TOTAL_BLOCKS_PER_CACHE + " blocks are now cached.");
return true;
}
}, 1000, 30000);
// Try to cache a smaller file. It should fail.
final long shortCacheDirectiveId =
dfs.addCacheDirective(new CacheDirectiveInfo.Builder()
.setPool("pool").setPath(SMALL_FILE).setReplication((short)1).build());
Thread.sleep(10000);
MetricsRecordBuilder dnMetrics = getMetrics(dn.getMetrics().name());
Assert.assertEquals(TOTAL_BLOCKS_PER_CACHE,
MetricsAsserts.getLongCounter("BlocksCached", dnMetrics));
// Uncache the big file and verify that the small file can now be
// cached (regression test for HDFS-6107)
dfs.removeCacheDirective(bigCacheDirectiveId);
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
RemoteIterator<CacheDirectiveEntry> iter;
try {
iter = dfs.listCacheDirectives(
new CacheDirectiveInfo.Builder().build());
CacheDirectiveEntry entry;
do {
entry = iter.next();
} while (entry.getInfo().getId() != shortCacheDirectiveId);
if (entry.getStats().getFilesCached() != 1) {
LOG.info("waiting for directive " + shortCacheDirectiveId +
" to be cached. stats = " + entry.getStats());
return false;
}
LOG.info("directive " + shortCacheDirectiveId + " has been cached.");
} catch (IOException e) {
Assert.fail("unexpected exception" + e.toString());
}
return true;
}
}, 1000, 30000);
dfs.removeCacheDirective(shortCacheDirectiveId);
}
}
| 23,443 | 38.601351 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBrVariations.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.*;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.UUID;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.*;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.protocol.*;
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
/**
* This test verifies that incremental block reports from a single DataNode are
* correctly handled by NN. Tests the following variations:
* #1 - Incremental BRs from all storages combined in a single call.
* #2 - Incremental BRs from separate storages sent in separate calls.
* #3 - Incremental BR from an unknown storage should be rejected.
*
* We also verify that the DataNode is not splitting the reports (it may do so
* in the future).
*/
public class TestIncrementalBrVariations {
public static final Log LOG = LogFactory.getLog(TestIncrementalBrVariations.class);
private static final short NUM_DATANODES = 1;
static final int BLOCK_SIZE = 1024;
static final int NUM_BLOCKS = 10;
private static final long seed = 0xFACEFEEDL;
private static final String NN_METRICS = "NameNodeActivity";
private MiniDFSCluster cluster;
private DistributedFileSystem fs;
private DFSClient client;
private static Configuration conf;
private String poolId;
private DataNode dn0; // DataNode at index0 in the MiniDFSCluster
private DatanodeRegistration dn0Reg; // DataNodeRegistration for dn0
static {
GenericTestUtils.setLogLevel(NameNode.stateChangeLog, Level.ALL);
GenericTestUtils.setLogLevel(BlockManager.blockLog, Level.ALL);
GenericTestUtils.setLogLevel(NameNode.blockStateChangeLog, Level.ALL);
GenericTestUtils
.setLogLevel(LogFactory.getLog(FSNamesystem.class), Level.ALL);
GenericTestUtils.setLogLevel(DataNode.LOG, Level.ALL);
GenericTestUtils.setLogLevel(TestIncrementalBrVariations.LOG, Level.ALL);
}
@Before
public void startUpCluster() throws IOException {
conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).build();
fs = cluster.getFileSystem();
client = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()),
cluster.getConfiguration(0));
dn0 = cluster.getDataNodes().get(0);
poolId = cluster.getNamesystem().getBlockPoolId();
dn0Reg = dn0.getDNRegistrationForBP(poolId);
}
@After
public void shutDownCluster() throws IOException {
client.close();
fs.close();
cluster.shutdownDataNodes();
cluster.shutdown();
}
/**
* Incremental BRs from all storages combined in a single message.
*/
@Test
public void testCombinedIncrementalBlockReport() throws IOException {
verifyIncrementalBlockReports(false);
}
/**
* One incremental BR per storage.
*/
@Test
public void testSplitIncrementalBlockReport() throws IOException {
verifyIncrementalBlockReports(true);
}
private LocatedBlocks createFileGetBlocks(String filenamePrefix) throws IOException {
Path filePath = new Path("/" + filenamePrefix + ".dat");
// Write out a file with a few blocks, get block locations.
DFSTestUtil.createFile(fs, filePath, BLOCK_SIZE, BLOCK_SIZE * NUM_BLOCKS,
BLOCK_SIZE, NUM_DATANODES, seed);
// Get the block list for the file with the block locations.
LocatedBlocks blocks = client.getLocatedBlocks(
filePath.toString(), 0, BLOCK_SIZE * NUM_BLOCKS);
assertThat(cluster.getNamesystem().getUnderReplicatedBlocks(), is(0L));
return blocks;
}
public void verifyIncrementalBlockReports(boolean splitReports) throws IOException {
// Get the block list for the file with the block locations.
LocatedBlocks blocks = createFileGetBlocks(GenericTestUtils.getMethodName());
try (FsDatasetSpi.FsVolumeReferences volumes
= dn0.getFSDataset().getFsVolumeReferences()) {
// We will send 'fake' incremental block reports to the NN that look
// like they originated from DN 0.
StorageReceivedDeletedBlocks reports[] =
new StorageReceivedDeletedBlocks[volumes.size()];
// Lie to the NN that one block on each storage has been deleted.
for (int i = 0; i < reports.length; ++i) {
FsVolumeSpi volume = volumes.get(i);
boolean foundBlockOnStorage = false;
ReceivedDeletedBlockInfo rdbi[] = new ReceivedDeletedBlockInfo[1];
// Find the first block on this storage and mark it as deleted for the
// report.
for (LocatedBlock block : blocks.getLocatedBlocks()) {
if (block.getStorageIDs()[0].equals(volume.getStorageID())) {
rdbi[0] =
new ReceivedDeletedBlockInfo(block.getBlock().getLocalBlock(),
ReceivedDeletedBlockInfo.BlockStatus.DELETED_BLOCK, null);
foundBlockOnStorage = true;
break;
}
}
assertTrue(foundBlockOnStorage);
reports[i] =
new StorageReceivedDeletedBlocks(volume.getStorageID(), rdbi);
if (splitReports) {
// If we are splitting reports then send the report for this storage now.
StorageReceivedDeletedBlocks singletonReport[] = { reports[i] };
cluster.getNameNodeRpc().blockReceivedAndDeleted(
dn0Reg, poolId, singletonReport);
}
}
if (!splitReports) {
// Send a combined report.
cluster.getNameNodeRpc()
.blockReceivedAndDeleted(dn0Reg, poolId, reports);
}
// Make sure that the deleted block from each storage was picked up
// by the NameNode.
assertThat(cluster.getNamesystem().getMissingBlocksCount(),
is((long) reports.length));
}
}
/**
* Verify that the DataNode sends a single incremental block report for all
* storages.
* @throws IOException
* @throws InterruptedException
*/
@Test (timeout=60000)
public void testDataNodeDoesNotSplitReports()
throws IOException, InterruptedException {
LocatedBlocks blocks = createFileGetBlocks(GenericTestUtils.getMethodName());
assertThat(cluster.getDataNodes().size(), is(1));
// Remove all blocks from the DataNode.
for (LocatedBlock block : blocks.getLocatedBlocks()) {
dn0.notifyNamenodeDeletedBlock(
block.getBlock(), block.getStorageIDs()[0]);
}
LOG.info("Triggering report after deleting blocks");
long ops = getLongCounter("BlockReceivedAndDeletedOps", getMetrics(NN_METRICS));
// Trigger a report to the NameNode and give it a few seconds.
DataNodeTestUtils.triggerBlockReport(dn0);
Thread.sleep(5000);
// Ensure that NameNodeRpcServer.blockReceivedAndDeletes is invoked
// exactly once after we triggered the report.
assertCounter("BlockReceivedAndDeletedOps", ops+1, getMetrics(NN_METRICS));
}
private static Block getDummyBlock() {
return new Block(10000000L, 100L, 1048576L);
}
private static StorageReceivedDeletedBlocks[] makeReportForReceivedBlock(
Block block, DatanodeStorage storage) {
ReceivedDeletedBlockInfo[] receivedBlocks = new ReceivedDeletedBlockInfo[1];
receivedBlocks[0] = new ReceivedDeletedBlockInfo(block, BlockStatus.RECEIVED_BLOCK, null);
StorageReceivedDeletedBlocks[] reports = new StorageReceivedDeletedBlocks[1];
reports[0] = new StorageReceivedDeletedBlocks(storage, receivedBlocks);
return reports;
}
/**
* Verify that the NameNode can learn about new storages from incremental
* block reports.
* This tests the fix for the error condition seen in HDFS-6904.
*
* @throws IOException
* @throws InterruptedException
*/
@Test (timeout=60000)
public void testNnLearnsNewStorages()
throws IOException, InterruptedException {
// Generate a report for a fake block on a fake storage.
final String newStorageUuid = UUID.randomUUID().toString();
final DatanodeStorage newStorage = new DatanodeStorage(newStorageUuid);
StorageReceivedDeletedBlocks[] reports = makeReportForReceivedBlock(
getDummyBlock(), newStorage);
// Send the report to the NN.
cluster.getNameNodeRpc().blockReceivedAndDeleted(dn0Reg, poolId, reports);
// Make sure that the NN has learned of the new storage.
DatanodeStorageInfo storageInfo = cluster.getNameNode()
.getNamesystem()
.getBlockManager()
.getDatanodeManager()
.getDatanode(dn0.getDatanodeId())
.getStorageInfo(newStorageUuid);
assertNotNull(storageInfo);
}
}
| 10,721 | 38.711111 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeFSDataSetSink.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import org.apache.commons.configuration.SubsetConfiguration;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.metrics2.AbstractMetric;
import org.apache.hadoop.metrics2.MetricsRecord;
import org.apache.hadoop.metrics2.MetricsSink;
import org.apache.hadoop.metrics2.MetricsTag;
import org.apache.hadoop.metrics2.impl.MetricsSystemImpl;
import org.junit.Test;
import java.util.Set;
import java.util.TreeSet;
import static org.junit.Assert.assertEquals;
public class TestDataNodeFSDataSetSink {
private static final MetricsSystemImpl ms = new
MetricsSystemImpl("TestFSDataSet");
class FSDataSetSinkTest implements MetricsSink {
private Set<String> nameMap;
private int count;
/**
* add a metrics record in the sink
*
* @param record the record to add
*/
@Override
public void putMetrics(MetricsRecord record) {
// let us do this only once, otherwise
// our count could go out of sync.
if (count == 0) {
for (AbstractMetric m : record.metrics()) {
if (nameMap.contains(m.name())) {
count++;
}
}
for (MetricsTag t : record.tags()) {
if (nameMap.contains(t.name())) {
count++;
}
}
}
}
/**
* Flush any buffered metrics
*/
@Override
public void flush() {
}
/**
* Initialize the plugin
*
* @param conf the configuration object for the plugin
*/
@Override
public void init(SubsetConfiguration conf) {
nameMap = new TreeSet<>();
nameMap.add("DfsUsed");
nameMap.add("Capacity");
nameMap.add("Remaining");
nameMap.add("StorageInfo");
nameMap.add("NumFailedVolumes");
nameMap.add("LastVolumeFailureDate");
nameMap.add("EstimatedCapacityLostTotal");
nameMap.add("CacheUsed");
nameMap.add("CacheCapacity");
nameMap.add("NumBlocksCached");
nameMap.add("NumBlocksFailedToCache");
nameMap.add("NumBlocksFailedToUnCache");
nameMap.add("Context");
nameMap.add("Hostname");
}
public int getMapCount() {
return nameMap.size();
}
public int getFoundKeyCount() {
return count;
}
}
@Test
/**
* This test creates a Source and then calls into the Sink that we
* have registered. That is calls into FSDataSetSinkTest
*/
public void testFSDataSetMetrics() throws InterruptedException {
Configuration conf = new HdfsConfiguration();
String bpid = "FSDatSetSink-Test";
SimulatedFSDataset fsdataset = new SimulatedFSDataset(null, conf);
fsdataset.addBlockPool(bpid, conf);
FSDataSetSinkTest sink = new FSDataSetSinkTest();
sink.init(null);
ms.init("Test");
ms.start();
ms.register("FSDataSetSource", "FSDataSetSource", fsdataset);
ms.register("FSDataSetSink", "FSDataSetSink", sink);
ms.startMetricsMBeans();
ms.publishMetricsNow();
Thread.sleep(4000);
ms.stopMetricsMBeans();
ms.shutdown();
// make sure we got all expected metric in the call back
assertEquals(sink.getMapCount(), sink.getFoundKeyCount());
}
}
| 4,056 | 28.613139 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestNNHandlesBlockReportPerStorage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import java.io.IOException;
import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
import org.apache.hadoop.util.Time;
/**
* Runs all tests in BlockReportTestBase, sending one block per storage.
* This is the default DataNode behavior post HDFS-2832.
*/
public class TestNNHandlesBlockReportPerStorage extends BlockReportTestBase {
@Override
protected void sendBlockReports(DatanodeRegistration dnR, String poolId,
StorageBlockReport[] reports) throws IOException {
int i = 0;
for (StorageBlockReport report : reports) {
LOG.info("Sending block report for storage " + report.getStorage().getStorageID());
StorageBlockReport[] singletonReport = { report };
cluster.getNameNodeRpc().blockReport(dnR, poolId, singletonReport,
new BlockReportContext(reports.length, i, System.nanoTime(), 0L));
i++;
}
}
}
| 1,868 | 37.9375 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestReadOnlySharedStorage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import static org.hamcrest.CoreMatchers.*;
import static org.junit.Assert.*;
import static org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State.*;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.Collections;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
import org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import com.google.common.collect.Iterables;
/**
* Test proper {@link BlockManager} replication counting for {@link DatanodeStorage}s
* with {@link DatanodeStorage.State#READ_ONLY_SHARED READ_ONLY} state.
*
* Uses {@link SimulatedFSDataset} to inject read-only replicas into a DataNode.
*/
public class TestReadOnlySharedStorage {
public static final Log LOG = LogFactory.getLog(TestReadOnlySharedStorage.class);
private static final short NUM_DATANODES = 3;
private static final int RO_NODE_INDEX = 0;
private static final int BLOCK_SIZE = 1024;
private static final long seed = 0x1BADF00DL;
private static final Path PATH = new Path("/" + TestReadOnlySharedStorage.class.getName() + ".dat");
private static final int RETRIES = 10;
private Configuration conf;
private MiniDFSCluster cluster;
private DistributedFileSystem fs;
private DFSClient client;
private BlockManager blockManager;
private DatanodeManager datanodeManager;
private DatanodeInfo normalDataNode;
private DatanodeInfo readOnlyDataNode;
private Block block;
private BlockInfo storedBlock;
private ExtendedBlock extendedBlock;
/**
* Setup a {@link MiniDFSCluster}.
* Create a block with both {@link State#NORMAL} and {@link State#READ_ONLY_SHARED} replicas.
*/
@Before
public void setup() throws IOException, InterruptedException {
conf = new HdfsConfiguration();
SimulatedFSDataset.setFactory(conf);
Configuration[] overlays = new Configuration[NUM_DATANODES];
for (int i = 0; i < overlays.length; i++) {
overlays[i] = new Configuration();
if (i == RO_NODE_INDEX) {
overlays[i].setEnum(SimulatedFSDataset.CONFIG_PROPERTY_STATE,
i == RO_NODE_INDEX
? READ_ONLY_SHARED
: NORMAL);
}
}
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_DATANODES)
.dataNodeConfOverlays(overlays)
.build();
fs = cluster.getFileSystem();
blockManager = cluster.getNameNode().getNamesystem().getBlockManager();
datanodeManager = blockManager.getDatanodeManager();
client = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()),
cluster.getConfiguration(0));
for (int i = 0; i < NUM_DATANODES; i++) {
DataNode dataNode = cluster.getDataNodes().get(i);
validateStorageState(
BlockManagerTestUtil.getStorageReportsForDatanode(
datanodeManager.getDatanode(dataNode.getDatanodeId())),
i == RO_NODE_INDEX
? READ_ONLY_SHARED
: NORMAL);
}
// Create a 1 block file
DFSTestUtil.createFile(fs, PATH, BLOCK_SIZE, BLOCK_SIZE,
BLOCK_SIZE, (short) 1, seed);
LocatedBlock locatedBlock = getLocatedBlock();
extendedBlock = locatedBlock.getBlock();
block = extendedBlock.getLocalBlock();
storedBlock = blockManager.getStoredBlock(block);
assertThat(locatedBlock.getLocations().length, is(1));
normalDataNode = locatedBlock.getLocations()[0];
readOnlyDataNode = datanodeManager.getDatanode(cluster.getDataNodes().get(RO_NODE_INDEX).getDatanodeId());
assertThat(normalDataNode, is(not(readOnlyDataNode)));
validateNumberReplicas(1);
// Inject the block into the datanode with READ_ONLY_SHARED storage
cluster.injectBlocks(0, RO_NODE_INDEX, Collections.singleton(block));
// There should now be 2 *locations* for the block
// Must wait until the NameNode has processed the block report for the injected blocks
waitForLocations(2);
}
@After
public void tearDown() throws IOException {
fs.delete(PATH, false);
if (cluster != null) {
fs.close();
cluster.shutdown();
cluster = null;
}
}
private void waitForLocations(int locations) throws IOException, InterruptedException {
for (int tries = 0; tries < RETRIES; )
try {
LocatedBlock locatedBlock = getLocatedBlock();
assertThat(locatedBlock.getLocations().length, is(locations));
break;
} catch (AssertionError e) {
if (++tries < RETRIES) {
Thread.sleep(1000);
} else {
throw e;
}
}
}
private LocatedBlock getLocatedBlock() throws IOException {
LocatedBlocks locatedBlocks = client.getLocatedBlocks(PATH.toString(), 0, BLOCK_SIZE);
assertThat(locatedBlocks.getLocatedBlocks().size(), is(1));
return Iterables.getOnlyElement(locatedBlocks.getLocatedBlocks());
}
private void validateStorageState(StorageReport[] storageReports, DatanodeStorage.State state) {
for (StorageReport storageReport : storageReports) {
DatanodeStorage storage = storageReport.getStorage();
assertThat(storage.getState(), is(state));
}
}
private void validateNumberReplicas(int expectedReplicas) throws IOException {
NumberReplicas numberReplicas = blockManager.countNodes(storedBlock);
assertThat(numberReplicas.liveReplicas(), is(expectedReplicas));
assertThat(numberReplicas.excessReplicas(), is(0));
assertThat(numberReplicas.corruptReplicas(), is(0));
assertThat(numberReplicas.decommissionedAndDecommissioning(), is(0));
assertThat(numberReplicas.replicasOnStaleNodes(), is(0));
BlockManagerTestUtil.updateState(blockManager);
assertThat(blockManager.getUnderReplicatedBlocksCount(), is(0L));
assertThat(blockManager.getExcessBlocksCount(), is(0L));
}
/**
* Verify that <tt>READ_ONLY_SHARED</tt> replicas are <i>not</i> counted towards the overall
* replication count, but <i>are</i> included as replica locations returned to clients for reads.
*/
@Test
public void testReplicaCounting() throws Exception {
// There should only be 1 *replica* (the READ_ONLY_SHARED doesn't count)
validateNumberReplicas(1);
fs.setReplication(PATH, (short) 2);
// There should now be 3 *locations* for the block, and 2 *replicas*
waitForLocations(3);
validateNumberReplicas(2);
}
/**
* Verify that the NameNode is able to still use <tt>READ_ONLY_SHARED</tt> replicas even
* when the single NORMAL replica is offline (and the effective replication count is 0).
*/
@Test
public void testNormalReplicaOffline() throws Exception {
// Stop the datanode hosting the NORMAL replica
cluster.stopDataNode(normalDataNode.getXferAddr());
// Force NameNode to detect that the datanode is down
BlockManagerTestUtil.noticeDeadDatanode(
cluster.getNameNode(), normalDataNode.getXferAddr());
// The live replica count should now be zero (since the NORMAL replica is offline)
NumberReplicas numberReplicas = blockManager.countNodes(storedBlock);
assertThat(numberReplicas.liveReplicas(), is(0));
// The block should be reported as under-replicated
BlockManagerTestUtil.updateState(blockManager);
assertThat(blockManager.getUnderReplicatedBlocksCount(), is(1L));
// The BlockManager should be able to heal the replication count back to 1
// by triggering an inter-datanode replication from one of the READ_ONLY_SHARED replicas
BlockManagerTestUtil.computeAllPendingWork(blockManager);
DFSTestUtil.waitForReplication(cluster, extendedBlock, 1, 1, 0);
// There should now be 2 *locations* for the block, and 1 *replica*
assertThat(getLocatedBlock().getLocations().length, is(2));
validateNumberReplicas(1);
}
/**
* Verify that corrupt <tt>READ_ONLY_SHARED</tt> replicas aren't counted
* towards the corrupt replicas total.
*/
@Test
public void testReadOnlyReplicaCorrupt() throws Exception {
// "Corrupt" a READ_ONLY_SHARED replica by reporting it as a bad replica
client.reportBadBlocks(new LocatedBlock[] {
new LocatedBlock(extendedBlock, new DatanodeInfo[] { readOnlyDataNode })
});
// There should now be only 1 *location* for the block as the READ_ONLY_SHARED is corrupt
waitForLocations(1);
// However, the corrupt READ_ONLY_SHARED replica should *not* affect the overall corrupt replicas count
NumberReplicas numberReplicas = blockManager.countNodes(storedBlock);
assertThat(numberReplicas.corruptReplicas(), is(0));
}
}
| 10,611 | 37.729927 | 110 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import static org.junit.Assert.fail;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyBoolean;
import static org.mockito.Matchers.anyListOf;
import static org.mockito.Matchers.anyLong;
import static org.mockito.Matchers.anyObject;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import java.io.File;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.DataNode.BlockRecord;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.DataChecksum;
import org.apache.log4j.Level;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
/**
* This tests if sync all replicas in block recovery works correctly
*/
public class TestBlockRecovery {
private static final Log LOG = LogFactory.getLog(TestBlockRecovery.class);
private static final String DATA_DIR =
MiniDFSCluster.getBaseDirectory() + "data";
private DataNode dn;
private Configuration conf;
private final static long RECOVERY_ID = 3000L;
private final static String CLUSTER_ID = "testClusterID";
private final static String POOL_ID = "BP-TEST";
private final static InetSocketAddress NN_ADDR = new InetSocketAddress(
"localhost", 5020);
private final static long BLOCK_ID = 1000L;
private final static long GEN_STAMP = 2000L;
private final static long BLOCK_LEN = 3000L;
private final static long REPLICA_LEN1 = 6000L;
private final static long REPLICA_LEN2 = 5000L;
private final static ExtendedBlock block = new ExtendedBlock(POOL_ID,
BLOCK_ID, BLOCK_LEN, GEN_STAMP);
static {
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger)LOG).getLogger().setLevel(Level.ALL);
}
/**
* Starts an instance of DataNode
* @throws IOException
*/
@Before
public void startUp() throws IOException, URISyntaxException {
conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, DATA_DIR);
conf.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "0.0.0.0:0");
conf.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
conf.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0");
conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
FileSystem.setDefaultUri(conf,
"hdfs://" + NN_ADDR.getHostName() + ":" + NN_ADDR.getPort());
ArrayList<StorageLocation> locations = new ArrayList<StorageLocation>();
File dataDir = new File(DATA_DIR);
FileUtil.fullyDelete(dataDir);
dataDir.mkdirs();
StorageLocation location = StorageLocation.parse(dataDir.getPath());
locations.add(location);
final DatanodeProtocolClientSideTranslatorPB namenode =
mock(DatanodeProtocolClientSideTranslatorPB.class);
Mockito.doAnswer(new Answer<DatanodeRegistration>() {
@Override
public DatanodeRegistration answer(InvocationOnMock invocation)
throws Throwable {
return (DatanodeRegistration) invocation.getArguments()[0];
}
}).when(namenode).registerDatanode(
Mockito.any(DatanodeRegistration.class));
when(namenode.versionRequest()).thenReturn(new NamespaceInfo
(1, CLUSTER_ID, POOL_ID, 1L));
when(namenode.sendHeartbeat(
Mockito.any(DatanodeRegistration.class),
Mockito.any(StorageReport[].class),
Mockito.anyLong(),
Mockito.anyLong(),
Mockito.anyInt(),
Mockito.anyInt(),
Mockito.anyInt(),
Mockito.any(VolumeFailureSummary.class),
Mockito.anyBoolean()))
.thenReturn(new HeartbeatResponse(
new DatanodeCommand[0],
new NNHAStatusHeartbeat(HAServiceState.ACTIVE, 1),
null, ThreadLocalRandom.current().nextLong() | 1L));
dn = new DataNode(conf, locations, null) {
@Override
DatanodeProtocolClientSideTranslatorPB connectToNN(
InetSocketAddress nnAddr) throws IOException {
Assert.assertEquals(NN_ADDR, nnAddr);
return namenode;
}
};
// Trigger a heartbeat so that it acknowledges the NN as active.
dn.getAllBpOs().get(0).triggerHeartbeatForTests();
}
/**
* Cleans the resources and closes the instance of datanode
* @throws IOException if an error occurred
*/
@After
public void tearDown() throws IOException {
if (dn != null) {
try {
dn.shutdown();
} catch(Exception e) {
LOG.error("Cannot close: ", e);
} finally {
File dir = new File(DATA_DIR);
if (dir.exists())
Assert.assertTrue(
"Cannot delete data-node dirs", FileUtil.fullyDelete(dir));
}
}
}
/** Sync two replicas */
private void testSyncReplicas(ReplicaRecoveryInfo replica1,
ReplicaRecoveryInfo replica2,
InterDatanodeProtocol dn1,
InterDatanodeProtocol dn2,
long expectLen) throws IOException {
DatanodeInfo[] locs = new DatanodeInfo[]{
mock(DatanodeInfo.class), mock(DatanodeInfo.class)};
RecoveringBlock rBlock = new RecoveringBlock(block,
locs, RECOVERY_ID);
ArrayList<BlockRecord> syncList = new ArrayList<BlockRecord>(2);
BlockRecord record1 = new BlockRecord(
DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234), dn1, replica1);
BlockRecord record2 = new BlockRecord(
DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234), dn2, replica2);
syncList.add(record1);
syncList.add(record2);
when(dn1.updateReplicaUnderRecovery((ExtendedBlock)anyObject(), anyLong(),
anyLong(), anyLong())).thenReturn("storage1");
when(dn2.updateReplicaUnderRecovery((ExtendedBlock)anyObject(), anyLong(),
anyLong(), anyLong())).thenReturn("storage2");
dn.syncBlock(rBlock, syncList);
}
/**
* BlockRecovery_02.8.
* Two replicas are in Finalized state
* @throws IOException in case of an error
*/
@Test
public void testFinalizedReplicas () throws IOException {
if(LOG.isDebugEnabled()) {
LOG.debug("Running " + GenericTestUtils.getMethodName());
}
ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID,
REPLICA_LEN1, GEN_STAMP-1, ReplicaState.FINALIZED);
ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID,
REPLICA_LEN1, GEN_STAMP-2, ReplicaState.FINALIZED);
InterDatanodeProtocol dn1 = mock(InterDatanodeProtocol.class);
InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);
testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1);
verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID,
REPLICA_LEN1);
verify(dn2).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID,
REPLICA_LEN1);
// two finalized replicas have different length
replica1 = new ReplicaRecoveryInfo(BLOCK_ID,
REPLICA_LEN1, GEN_STAMP-1, ReplicaState.FINALIZED);
replica2 = new ReplicaRecoveryInfo(BLOCK_ID,
REPLICA_LEN2, GEN_STAMP-2, ReplicaState.FINALIZED);
try {
testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1);
Assert.fail("Two finalized replicas should not have different lengthes!");
} catch (IOException e) {
Assert.assertTrue(e.getMessage().startsWith(
"Inconsistent size of finalized replicas. "));
}
}
/**
* BlockRecovery_02.9.
* One replica is Finalized and another is RBW.
* @throws IOException in case of an error
*/
@Test
public void testFinalizedRbwReplicas() throws IOException {
if(LOG.isDebugEnabled()) {
LOG.debug("Running " + GenericTestUtils.getMethodName());
}
// rbw and finalized replicas have the same length
ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID,
REPLICA_LEN1, GEN_STAMP-1, ReplicaState.FINALIZED);
ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID,
REPLICA_LEN1, GEN_STAMP-2, ReplicaState.RBW);
InterDatanodeProtocol dn1 = mock(InterDatanodeProtocol.class);
InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);
testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1);
verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID,
REPLICA_LEN1);
verify(dn2).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID,
REPLICA_LEN1);
// rbw replica has a different length from the finalized one
replica1 = new ReplicaRecoveryInfo(BLOCK_ID,
REPLICA_LEN1, GEN_STAMP-1, ReplicaState.FINALIZED);
replica2 = new ReplicaRecoveryInfo(BLOCK_ID,
REPLICA_LEN2, GEN_STAMP-2, ReplicaState.RBW);
dn1 = mock(InterDatanodeProtocol.class);
dn2 = mock(InterDatanodeProtocol.class);
testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1);
verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1);
verify(dn2, never()).updateReplicaUnderRecovery(
block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1);
}
/**
* BlockRecovery_02.10.
* One replica is Finalized and another is RWR.
* @throws IOException in case of an error
*/
@Test
public void testFinalizedRwrReplicas() throws IOException {
if(LOG.isDebugEnabled()) {
LOG.debug("Running " + GenericTestUtils.getMethodName());
}
// rbw and finalized replicas have the same length
ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID,
REPLICA_LEN1, GEN_STAMP-1, ReplicaState.FINALIZED);
ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID,
REPLICA_LEN1, GEN_STAMP-2, ReplicaState.RWR);
InterDatanodeProtocol dn1 = mock(InterDatanodeProtocol.class);
InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);
testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1);
verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID,
REPLICA_LEN1);
verify(dn2, never()).updateReplicaUnderRecovery(
block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1);
// rbw replica has a different length from the finalized one
replica1 = new ReplicaRecoveryInfo(BLOCK_ID,
REPLICA_LEN1, GEN_STAMP-1, ReplicaState.FINALIZED);
replica2 = new ReplicaRecoveryInfo(BLOCK_ID,
REPLICA_LEN2, GEN_STAMP-2, ReplicaState.RBW);
dn1 = mock(InterDatanodeProtocol.class);
dn2 = mock(InterDatanodeProtocol.class);
testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1);
verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID,
REPLICA_LEN1);
verify(dn2, never()).updateReplicaUnderRecovery(
block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1);
}
/**
* BlockRecovery_02.11.
* Two replicas are RBW.
* @throws IOException in case of an error
*/
@Test
public void testRBWReplicas() throws IOException {
if(LOG.isDebugEnabled()) {
LOG.debug("Running " + GenericTestUtils.getMethodName());
}
ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID,
REPLICA_LEN1, GEN_STAMP-1, ReplicaState.RBW);
ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID,
REPLICA_LEN2, GEN_STAMP-2, ReplicaState.RBW);
InterDatanodeProtocol dn1 = mock(InterDatanodeProtocol.class);
InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);
long minLen = Math.min(REPLICA_LEN1, REPLICA_LEN2);
testSyncReplicas(replica1, replica2, dn1, dn2, minLen);
verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, minLen);
verify(dn2).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, minLen);
}
/**
* BlockRecovery_02.12.
* One replica is RBW and another is RWR.
* @throws IOException in case of an error
*/
@Test
public void testRBW_RWRReplicas() throws IOException {
if(LOG.isDebugEnabled()) {
LOG.debug("Running " + GenericTestUtils.getMethodName());
}
ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID,
REPLICA_LEN1, GEN_STAMP-1, ReplicaState.RBW);
ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID,
REPLICA_LEN1, GEN_STAMP-2, ReplicaState.RWR);
InterDatanodeProtocol dn1 = mock(InterDatanodeProtocol.class);
InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);
testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1);
verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID,
REPLICA_LEN1);
verify(dn2, never()).updateReplicaUnderRecovery(
block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1);
}
/**
* BlockRecovery_02.13.
* Two replicas are RWR.
* @throws IOException in case of an error
*/
@Test
public void testRWRReplicas() throws IOException {
if(LOG.isDebugEnabled()) {
LOG.debug("Running " + GenericTestUtils.getMethodName());
}
ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID,
REPLICA_LEN1, GEN_STAMP-1, ReplicaState.RWR);
ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID,
REPLICA_LEN2, GEN_STAMP-2, ReplicaState.RWR);
InterDatanodeProtocol dn1 = mock(InterDatanodeProtocol.class);
InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);
long minLen = Math.min(REPLICA_LEN1, REPLICA_LEN2);
testSyncReplicas(replica1, replica2, dn1, dn2, minLen);
verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, minLen);
verify(dn2).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, minLen);
}
private Collection<RecoveringBlock> initRecoveringBlocks() throws IOException {
Collection<RecoveringBlock> blocks = new ArrayList<RecoveringBlock>(1);
DatanodeInfo mockOtherDN = DFSTestUtil.getLocalDatanodeInfo();
DatanodeInfo[] locs = new DatanodeInfo[] {
new DatanodeInfo(dn.getDNRegistrationForBP(block.getBlockPoolId())),
mockOtherDN };
RecoveringBlock rBlock = new RecoveringBlock(block, locs, RECOVERY_ID);
blocks.add(rBlock);
return blocks;
}
/**
* BlockRecoveryFI_05. One DN throws RecoveryInProgressException.
*
* @throws IOException
* in case of an error
*/
@Test
public void testRecoveryInProgressException()
throws IOException, InterruptedException {
if(LOG.isDebugEnabled()) {
LOG.debug("Running " + GenericTestUtils.getMethodName());
}
DataNode spyDN = spy(dn);
doThrow(new RecoveryInProgressException("Replica recovery is in progress")).
when(spyDN).initReplicaRecovery(any(RecoveringBlock.class));
Daemon d = spyDN.recoverBlocks("fake NN", initRecoveringBlocks());
d.join();
verify(spyDN, never()).syncBlock(
any(RecoveringBlock.class), anyListOf(BlockRecord.class));
}
/**
* BlockRecoveryFI_06. all datanodes throws an exception.
*
* @throws IOException
* in case of an error
*/
@Test
public void testErrorReplicas() throws IOException, InterruptedException {
if(LOG.isDebugEnabled()) {
LOG.debug("Running " + GenericTestUtils.getMethodName());
}
DataNode spyDN = spy(dn);
doThrow(new IOException()).
when(spyDN).initReplicaRecovery(any(RecoveringBlock.class));
Daemon d = spyDN.recoverBlocks("fake NN", initRecoveringBlocks());
d.join();
verify(spyDN, never()).syncBlock(
any(RecoveringBlock.class), anyListOf(BlockRecord.class));
}
/**
* BlockRecoveryFI_07. max replica length from all DNs is zero.
*
* @throws IOException in case of an error
*/
@Test
public void testZeroLenReplicas() throws IOException, InterruptedException {
if(LOG.isDebugEnabled()) {
LOG.debug("Running " + GenericTestUtils.getMethodName());
}
DataNode spyDN = spy(dn);
doReturn(new ReplicaRecoveryInfo(block.getBlockId(), 0,
block.getGenerationStamp(), ReplicaState.FINALIZED)).when(spyDN).
initReplicaRecovery(any(RecoveringBlock.class));
Daemon d = spyDN.recoverBlocks("fake NN", initRecoveringBlocks());
d.join();
DatanodeProtocol dnP = dn.getActiveNamenodeForBP(POOL_ID);
verify(dnP).commitBlockSynchronization(
block, RECOVERY_ID, 0, true, true, DatanodeID.EMPTY_ARRAY, null);
}
private List<BlockRecord> initBlockRecords(DataNode spyDN) throws IOException {
List<BlockRecord> blocks = new ArrayList<BlockRecord>(1);
DatanodeRegistration dnR = dn.getDNRegistrationForBP(block.getBlockPoolId());
BlockRecord blockRecord = new BlockRecord(
new DatanodeID(dnR), spyDN,
new ReplicaRecoveryInfo(block.getBlockId(), block.getNumBytes(),
block.getGenerationStamp(), ReplicaState.FINALIZED));
blocks.add(blockRecord);
return blocks;
}
private final static RecoveringBlock rBlock =
new RecoveringBlock(block, null, RECOVERY_ID);
/**
* BlockRecoveryFI_09. some/all DNs failed to update replicas.
*
* @throws IOException in case of an error
*/
@Test
public void testFailedReplicaUpdate() throws IOException {
if(LOG.isDebugEnabled()) {
LOG.debug("Running " + GenericTestUtils.getMethodName());
}
DataNode spyDN = spy(dn);
doThrow(new IOException()).when(spyDN).updateReplicaUnderRecovery(
block, RECOVERY_ID, BLOCK_ID, block.getNumBytes());
try {
spyDN.syncBlock(rBlock, initBlockRecords(spyDN));
fail("Sync should fail");
} catch (IOException e) {
e.getMessage().startsWith("Cannot recover ");
}
}
/**
* BlockRecoveryFI_10. DN has no ReplicaUnderRecovery.
*
* @throws IOException in case of an error
*/
@Test
public void testNoReplicaUnderRecovery() throws IOException {
if(LOG.isDebugEnabled()) {
LOG.debug("Running " + GenericTestUtils.getMethodName());
}
dn.data.createRbw(StorageType.DEFAULT, block, false);
try {
dn.syncBlock(rBlock, initBlockRecords(dn));
fail("Sync should fail");
} catch (IOException e) {
e.getMessage().startsWith("Cannot recover ");
}
DatanodeProtocol namenode = dn.getActiveNamenodeForBP(POOL_ID);
verify(namenode, never()).commitBlockSynchronization(
any(ExtendedBlock.class), anyLong(), anyLong(), anyBoolean(),
anyBoolean(), any(DatanodeID[].class), any(String[].class));
}
/**
* BlockRecoveryFI_11. a replica's recovery id does not match new GS.
*
* @throws IOException in case of an error
*/
@Test
public void testNotMatchedReplicaID() throws IOException {
if(LOG.isDebugEnabled()) {
LOG.debug("Running " + GenericTestUtils.getMethodName());
}
ReplicaInPipelineInterface replicaInfo = dn.data.createRbw(
StorageType.DEFAULT, block, false).getReplica();
ReplicaOutputStreams streams = null;
try {
streams = replicaInfo.createStreams(true,
DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512));
streams.getChecksumOut().write('a');
dn.data.initReplicaRecovery(new RecoveringBlock(block, null, RECOVERY_ID+1));
try {
dn.syncBlock(rBlock, initBlockRecords(dn));
fail("Sync should fail");
} catch (IOException e) {
e.getMessage().startsWith("Cannot recover ");
}
DatanodeProtocol namenode = dn.getActiveNamenodeForBP(POOL_ID);
verify(namenode, never()).commitBlockSynchronization(
any(ExtendedBlock.class), anyLong(), anyLong(), anyBoolean(),
anyBoolean(), any(DatanodeID[].class), any(String[].class));
} finally {
streams.close();
}
}
/**
* Test to verify the race between finalizeBlock and Lease recovery
*
* @throws Exception
*/
@Test(timeout = 20000)
public void testRaceBetweenReplicaRecoveryAndFinalizeBlock() throws Exception {
tearDown();// Stop the Mocked DN started in startup()
Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_KEY, "1000");
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(1).build();
try {
cluster.waitClusterUp();
DistributedFileSystem fs = cluster.getFileSystem();
Path path = new Path("/test");
FSDataOutputStream out = fs.create(path);
out.writeBytes("data");
out.hsync();
List<LocatedBlock> blocks = DFSTestUtil.getAllBlocks(fs.open(path));
final LocatedBlock block = blocks.get(0);
final DataNode dataNode = cluster.getDataNodes().get(0);
final AtomicBoolean recoveryInitResult = new AtomicBoolean(true);
Thread recoveryThread = new Thread() {
@Override
public void run() {
try {
DatanodeInfo[] locations = block.getLocations();
final RecoveringBlock recoveringBlock = new RecoveringBlock(
block.getBlock(), locations, block.getBlock()
.getGenerationStamp() + 1);
synchronized (dataNode.data) {
Thread.sleep(2000);
dataNode.initReplicaRecovery(recoveringBlock);
}
} catch (Exception e) {
recoveryInitResult.set(false);
}
}
};
recoveryThread.start();
try {
out.close();
} catch (IOException e) {
Assert.assertTrue("Writing should fail",
e.getMessage().contains("are bad. Aborting..."));
} finally {
recoveryThread.join();
}
Assert.assertTrue("Recovery should be initiated successfully",
recoveryInitResult.get());
dataNode.updateReplicaUnderRecovery(block.getBlock(), block.getBlock()
.getGenerationStamp() + 1, block.getBlock().getBlockId(),
block.getBlockSize());
} finally {
if (null != cluster) {
cluster.shutdown();
cluster = null;
}
}
}
}
| 25,334 | 37.738532 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import static org.junit.Assert.assertEquals;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
import org.mockito.internal.util.reflection.Whitebox;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
public class TestBlockPoolManager {
private final Log LOG = LogFactory.getLog(TestBlockPoolManager.class);
private final DataNode mockDN = Mockito.mock(DataNode.class);
private BlockPoolManager bpm;
private final StringBuilder log = new StringBuilder();
private int mockIdx = 1;
@Before
public void setupBPM() {
bpm = new BlockPoolManager(mockDN){
@Override
protected BPOfferService createBPOS(List<InetSocketAddress> nnAddrs) {
final int idx = mockIdx++;
doLog("create #" + idx);
final BPOfferService bpos = Mockito.mock(BPOfferService.class);
Mockito.doReturn("Mock BPOS #" + idx).when(bpos).toString();
// Log refreshes
try {
Mockito.doAnswer(
new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
doLog("refresh #" + idx);
return null;
}
}).when(bpos).refreshNNList(
Mockito.<ArrayList<InetSocketAddress>>any());
} catch (IOException e) {
throw new RuntimeException(e);
}
// Log stops
Mockito.doAnswer(
new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
doLog("stop #" + idx);
bpm.remove(bpos);
return null;
}
}).when(bpos).stop();
return bpos;
}
};
}
private void doLog(String string) {
synchronized(log) {
LOG.info(string);
log.append(string).append("\n");
}
}
@Test
public void testSimpleSingleNS() throws Exception {
Configuration conf = new Configuration();
conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY,
"hdfs://mock1:8020");
bpm.refreshNamenodes(conf);
assertEquals("create #1\n", log.toString());
}
@Test
public void testFederationRefresh() throws Exception {
Configuration conf = new Configuration();
conf.set(DFSConfigKeys.DFS_NAMESERVICES,
"ns1,ns2");
addNN(conf, "ns1", "mock1:8020");
addNN(conf, "ns2", "mock1:8020");
bpm.refreshNamenodes(conf);
assertEquals(
"create #1\n" +
"create #2\n", log.toString());
log.setLength(0);
// Remove the first NS
conf.set(DFSConfigKeys.DFS_NAMESERVICES,
"ns2");
bpm.refreshNamenodes(conf);
assertEquals(
"stop #1\n" +
"refresh #2\n", log.toString());
log.setLength(0);
// Add back an NS -- this creates a new BPOS since the old
// one for ns2 should have been previously retired
conf.set(DFSConfigKeys.DFS_NAMESERVICES,
"ns1,ns2");
bpm.refreshNamenodes(conf);
assertEquals(
"create #3\n" +
"refresh #2\n", log.toString());
}
@Test
public void testInternalNameService() throws Exception {
Configuration conf = new Configuration();
conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns1,ns2,ns3");
addNN(conf, "ns1", "mock1:8020");
addNN(conf, "ns2", "mock1:8020");
addNN(conf, "ns3", "mock1:8020");
conf.set(DFSConfigKeys.DFS_INTERNAL_NAMESERVICES_KEY, "ns1");
bpm.refreshNamenodes(conf);
assertEquals("create #1\n", log.toString());
@SuppressWarnings("unchecked")
Map<String, BPOfferService> map = (Map<String, BPOfferService>) Whitebox
.getInternalState(bpm, "bpByNameserviceId");
Assert.assertFalse(map.containsKey("ns2"));
Assert.assertFalse(map.containsKey("ns3"));
Assert.assertTrue(map.containsKey("ns1"));
log.setLength(0);
}
private static void addNN(Configuration conf, String ns, String addr) {
String key = DFSUtil.addKeySuffixes(
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, ns);
conf.set(key, addr);
}
}
| 5,347 | 32.217391 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.nio.channels.ClosedChannelException;
import java.nio.channels.FileChannel;
import java.util.LinkedList;
import java.util.List;
import java.util.Random;
import org.apache.commons.io.FileUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.LazyPersistTestCase;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Test;
/**
* Tests {@link DirectoryScanner} handling of differences
* between blocks on the disk and block in memory.
*/
public class TestDirectoryScanner {
private static final Log LOG = LogFactory.getLog(TestDirectoryScanner.class);
private static final Configuration CONF = new HdfsConfiguration();
private static final int DEFAULT_GEN_STAMP = 9999;
private MiniDFSCluster cluster;
private String bpid;
private DFSClient client;
private FsDatasetSpi<? extends FsVolumeSpi> fds = null;
private DirectoryScanner scanner = null;
private final Random rand = new Random();
private final Random r = new Random();
private static final int BLOCK_LENGTH = 100;
static {
CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_LENGTH);
CONF.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 1);
CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
CONF.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
Long.MAX_VALUE);
}
/** create a file with a length of <code>fileLen</code> */
private List<LocatedBlock> createFile(String fileNamePrefix,
long fileLen,
boolean isLazyPersist) throws IOException {
FileSystem fs = cluster.getFileSystem();
Path filePath = new Path("/" + fileNamePrefix + ".dat");
DFSTestUtil.createFile(
fs, filePath, isLazyPersist, 1024, fileLen,
BLOCK_LENGTH, (short) 1, r.nextLong(), false);
return client.getLocatedBlocks(filePath.toString(), 0, fileLen).getLocatedBlocks();
}
/** Truncate a block file */
private long truncateBlockFile() throws IOException {
synchronized (fds) {
for (ReplicaInfo b : FsDatasetTestUtil.getReplicas(fds, bpid)) {
File f = b.getBlockFile();
File mf = b.getMetaFile();
// Truncate a block file that has a corresponding metadata file
if (f.exists() && f.length() != 0 && mf.exists()) {
FileOutputStream s = null;
FileChannel channel = null;
try {
s = new FileOutputStream(f);
channel = s.getChannel();
channel.truncate(0);
LOG.info("Truncated block file " + f.getAbsolutePath());
return b.getBlockId();
} finally {
IOUtils.cleanup(LOG, channel, s);
}
}
}
}
return 0;
}
/** Delete a block file */
private long deleteBlockFile() {
synchronized(fds) {
for (ReplicaInfo b : FsDatasetTestUtil.getReplicas(fds, bpid)) {
File f = b.getBlockFile();
File mf = b.getMetaFile();
// Delete a block file that has corresponding metadata file
if (f.exists() && mf.exists() && f.delete()) {
LOG.info("Deleting block file " + f.getAbsolutePath());
return b.getBlockId();
}
}
}
return 0;
}
/** Delete block meta file */
private long deleteMetaFile() {
synchronized(fds) {
for (ReplicaInfo b : FsDatasetTestUtil.getReplicas(fds, bpid)) {
File file = b.getMetaFile();
// Delete a metadata file
if (file.exists() && file.delete()) {
LOG.info("Deleting metadata file " + file.getAbsolutePath());
return b.getBlockId();
}
}
}
return 0;
}
/**
* Duplicate the given block on all volumes.
* @param blockId
* @throws IOException
*/
private void duplicateBlock(long blockId) throws IOException {
synchronized (fds) {
ReplicaInfo b = FsDatasetTestUtil.fetchReplicaInfo(fds, bpid, blockId);
try (FsDatasetSpi.FsVolumeReferences volumes =
fds.getFsVolumeReferences()) {
for (FsVolumeSpi v : volumes) {
if (v.getStorageID().equals(b.getVolume().getStorageID())) {
continue;
}
// Volume without a copy of the block. Make a copy now.
File sourceBlock = b.getBlockFile();
File sourceMeta = b.getMetaFile();
String sourceRoot = b.getVolume().getBasePath();
String destRoot = v.getBasePath();
String relativeBlockPath =
new File(sourceRoot).toURI().relativize(sourceBlock.toURI())
.getPath();
String relativeMetaPath =
new File(sourceRoot).toURI().relativize(sourceMeta.toURI())
.getPath();
File destBlock = new File(destRoot, relativeBlockPath);
File destMeta = new File(destRoot, relativeMetaPath);
destBlock.getParentFile().mkdirs();
FileUtils.copyFile(sourceBlock, destBlock);
FileUtils.copyFile(sourceMeta, destMeta);
if (destBlock.exists() && destMeta.exists()) {
LOG.info("Copied " + sourceBlock + " ==> " + destBlock);
LOG.info("Copied " + sourceMeta + " ==> " + destMeta);
}
}
}
}
}
/** Get a random blockId that is not used already */
private long getFreeBlockId() {
long id = rand.nextLong();
while (true) {
id = rand.nextLong();
if (FsDatasetTestUtil.fetchReplicaInfo(fds, bpid, id) == null) {
break;
}
}
return id;
}
private String getBlockFile(long id) {
return Block.BLOCK_FILE_PREFIX + id;
}
private String getMetaFile(long id) {
return Block.BLOCK_FILE_PREFIX + id + "_" + DEFAULT_GEN_STAMP
+ Block.METADATA_EXTENSION;
}
/** Create a block file in a random volume*/
private long createBlockFile() throws IOException {
long id = getFreeBlockId();
try (FsDatasetSpi.FsVolumeReferences volumes = fds.getFsVolumeReferences()) {
int numVolumes = volumes.size();
int index = rand.nextInt(numVolumes - 1);
File finalizedDir = volumes.get(index).getFinalizedDir(bpid);
File file = new File(finalizedDir, getBlockFile(id));
if (file.createNewFile()) {
LOG.info("Created block file " + file.getName());
}
}
return id;
}
/** Create a metafile in a random volume*/
private long createMetaFile() throws IOException {
long id = getFreeBlockId();
try (FsDatasetSpi.FsVolumeReferences refs = fds.getFsVolumeReferences()) {
int numVolumes = refs.size();
int index = rand.nextInt(numVolumes - 1);
File finalizedDir = refs.get(index).getFinalizedDir(bpid);
File file = new File(finalizedDir, getMetaFile(id));
if (file.createNewFile()) {
LOG.info("Created metafile " + file.getName());
}
}
return id;
}
/** Create block file and corresponding metafile in a rondom volume */
private long createBlockMetaFile() throws IOException {
long id = getFreeBlockId();
try (FsDatasetSpi.FsVolumeReferences refs = fds.getFsVolumeReferences()) {
int numVolumes = refs.size();
int index = rand.nextInt(numVolumes - 1);
File finalizedDir = refs.get(index).getFinalizedDir(bpid);
File file = new File(finalizedDir, getBlockFile(id));
if (file.createNewFile()) {
LOG.info("Created block file " + file.getName());
// Create files with same prefix as block file but extension names
// such that during sorting, these files appear around meta file
// to test how DirectoryScanner handles extraneous files
String name1 = file.getAbsolutePath() + ".l";
String name2 = file.getAbsolutePath() + ".n";
file = new File(name1);
if (file.createNewFile()) {
LOG.info("Created extraneous file " + name1);
}
file = new File(name2);
if (file.createNewFile()) {
LOG.info("Created extraneous file " + name2);
}
file = new File(finalizedDir, getMetaFile(id));
if (file.createNewFile()) {
LOG.info("Created metafile " + file.getName());
}
}
}
return id;
}
private void scan(long totalBlocks, int diffsize, long missingMetaFile, long missingBlockFile,
long missingMemoryBlocks, long mismatchBlocks) throws IOException {
scan(totalBlocks, diffsize, missingMetaFile, missingBlockFile,
missingMemoryBlocks, mismatchBlocks, 0);
}
private void scan(long totalBlocks, int diffsize, long missingMetaFile, long missingBlockFile,
long missingMemoryBlocks, long mismatchBlocks, long duplicateBlocks) throws IOException {
scanner.reconcile();
assertTrue(scanner.diffs.containsKey(bpid));
LinkedList<DirectoryScanner.ScanInfo> diff = scanner.diffs.get(bpid);
assertTrue(scanner.stats.containsKey(bpid));
DirectoryScanner.Stats stats = scanner.stats.get(bpid);
assertEquals(diffsize, diff.size());
assertEquals(totalBlocks, stats.totalBlocks);
assertEquals(missingMetaFile, stats.missingMetaFile);
assertEquals(missingBlockFile, stats.missingBlockFile);
assertEquals(missingMemoryBlocks, stats.missingMemoryBlocks);
assertEquals(mismatchBlocks, stats.mismatchBlocks);
assertEquals(duplicateBlocks, stats.duplicateBlocks);
}
@Test (timeout=300000)
public void testRetainBlockOnPersistentStorage() throws Exception {
LazyPersistTestCase.initCacheManipulator();
cluster = new MiniDFSCluster
.Builder(CONF)
.storageTypes(new StorageType[] { StorageType.RAM_DISK, StorageType.DEFAULT })
.numDataNodes(1)
.build();
try {
cluster.waitActive();
DataNode dataNode = cluster.getDataNodes().get(0);
bpid = cluster.getNamesystem().getBlockPoolId();
fds = DataNodeTestUtils.getFSDataset(cluster.getDataNodes().get(0));
client = cluster.getFileSystem().getClient();
scanner = new DirectoryScanner(dataNode, fds, CONF);
scanner.setRetainDiffs(true);
FsDatasetTestUtil.stopLazyWriter(cluster.getDataNodes().get(0));
// Add a file with 1 block
List<LocatedBlock> blocks =
createFile(GenericTestUtils.getMethodName(), BLOCK_LENGTH, false);
// Ensure no difference between volumeMap and disk.
scan(1, 0, 0, 0, 0, 0);
// Make a copy of the block on RAM_DISK and ensure that it is
// picked up by the scanner.
duplicateBlock(blocks.get(0).getBlock().getBlockId());
scan(2, 1, 0, 0, 0, 0, 1);
verifyStorageType(blocks.get(0).getBlock().getBlockId(), false);
scan(1, 0, 0, 0, 0, 0);
} finally {
if (scanner != null) {
scanner.shutdown();
scanner = null;
}
cluster.shutdown();
cluster = null;
}
}
@Test (timeout=300000)
public void testDeleteBlockOnTransientStorage() throws Exception {
LazyPersistTestCase.initCacheManipulator();
cluster = new MiniDFSCluster
.Builder(CONF)
.storageTypes(new StorageType[] { StorageType.RAM_DISK, StorageType.DEFAULT })
.numDataNodes(1)
.build();
try {
cluster.waitActive();
bpid = cluster.getNamesystem().getBlockPoolId();
DataNode dataNode = cluster.getDataNodes().get(0);
fds = DataNodeTestUtils.getFSDataset(cluster.getDataNodes().get(0));
client = cluster.getFileSystem().getClient();
scanner = new DirectoryScanner(dataNode, fds, CONF);
scanner.setRetainDiffs(true);
FsDatasetTestUtil.stopLazyWriter(cluster.getDataNodes().get(0));
// Create a file file on RAM_DISK
List<LocatedBlock> blocks =
createFile(GenericTestUtils.getMethodName(), BLOCK_LENGTH, true);
// Ensure no difference between volumeMap and disk.
scan(1, 0, 0, 0, 0, 0);
// Make a copy of the block on DEFAULT storage and ensure that it is
// picked up by the scanner.
duplicateBlock(blocks.get(0).getBlock().getBlockId());
scan(2, 1, 0, 0, 0, 0, 1);
// Ensure that the copy on RAM_DISK was deleted.
verifyStorageType(blocks.get(0).getBlock().getBlockId(), false);
scan(1, 0, 0, 0, 0, 0);
} finally {
if (scanner != null) {
scanner.shutdown();
scanner = null;
}
cluster.shutdown();
cluster = null;
}
}
@Test (timeout=600000)
public void testDirectoryScanner() throws Exception {
// Run the test with and without parallel scanning
for (int parallelism = 1; parallelism < 3; parallelism++) {
runTest(parallelism);
}
}
public void runTest(int parallelism) throws Exception {
cluster = new MiniDFSCluster.Builder(CONF).build();
try {
cluster.waitActive();
bpid = cluster.getNamesystem().getBlockPoolId();
fds = DataNodeTestUtils.getFSDataset(cluster.getDataNodes().get(0));
client = cluster.getFileSystem().getClient();
CONF.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY,
parallelism);
DataNode dataNode = cluster.getDataNodes().get(0);
scanner = new DirectoryScanner(dataNode, fds, CONF);
scanner.setRetainDiffs(true);
// Add files with 100 blocks
createFile(GenericTestUtils.getMethodName(), BLOCK_LENGTH * 100, false);
long totalBlocks = 100;
// Test1: No difference between volumeMap and disk
scan(100, 0, 0, 0, 0, 0);
// Test2: block metafile is missing
long blockId = deleteMetaFile();
scan(totalBlocks, 1, 1, 0, 0, 1);
verifyGenStamp(blockId, HdfsConstants.GRANDFATHER_GENERATION_STAMP);
scan(totalBlocks, 0, 0, 0, 0, 0);
// Test3: block file is missing
blockId = deleteBlockFile();
scan(totalBlocks, 1, 0, 1, 0, 0);
totalBlocks--;
verifyDeletion(blockId);
scan(totalBlocks, 0, 0, 0, 0, 0);
// Test4: A block file exists for which there is no metafile and
// a block in memory
blockId = createBlockFile();
totalBlocks++;
scan(totalBlocks, 1, 1, 0, 1, 0);
verifyAddition(blockId, HdfsConstants.GRANDFATHER_GENERATION_STAMP, 0);
scan(totalBlocks, 0, 0, 0, 0, 0);
// Test5: A metafile exists for which there is no block file and
// a block in memory
blockId = createMetaFile();
scan(totalBlocks+1, 1, 0, 1, 1, 0);
File metafile = new File(getMetaFile(blockId));
assertTrue(!metafile.exists());
scan(totalBlocks, 0, 0, 0, 0, 0);
// Test6: A block file and metafile exists for which there is no block in
// memory
blockId = createBlockMetaFile();
totalBlocks++;
scan(totalBlocks, 1, 0, 0, 1, 0);
verifyAddition(blockId, DEFAULT_GEN_STAMP, 0);
scan(totalBlocks, 0, 0, 0, 0, 0);
// Test7: Delete bunch of metafiles
for (int i = 0; i < 10; i++) {
blockId = deleteMetaFile();
}
scan(totalBlocks, 10, 10, 0, 0, 10);
scan(totalBlocks, 0, 0, 0, 0, 0);
// Test8: Delete bunch of block files
for (int i = 0; i < 10; i++) {
blockId = deleteBlockFile();
}
scan(totalBlocks, 10, 0, 10, 0, 0);
totalBlocks -= 10;
scan(totalBlocks, 0, 0, 0, 0, 0);
// Test9: create a bunch of blocks files
for (int i = 0; i < 10 ; i++) {
blockId = createBlockFile();
}
totalBlocks += 10;
scan(totalBlocks, 10, 10, 0, 10, 0);
scan(totalBlocks, 0, 0, 0, 0, 0);
// Test10: create a bunch of metafiles
for (int i = 0; i < 10 ; i++) {
blockId = createMetaFile();
}
scan(totalBlocks+10, 10, 0, 10, 10, 0);
scan(totalBlocks, 0, 0, 0, 0, 0);
// Test11: create a bunch block files and meta files
for (int i = 0; i < 10 ; i++) {
blockId = createBlockMetaFile();
}
totalBlocks += 10;
scan(totalBlocks, 10, 0, 0, 10, 0);
scan(totalBlocks, 0, 0, 0, 0, 0);
// Test12: truncate block files to test block length mismatch
for (int i = 0; i < 10 ; i++) {
truncateBlockFile();
}
scan(totalBlocks, 10, 0, 0, 0, 10);
scan(totalBlocks, 0, 0, 0, 0, 0);
// Test13: all the conditions combined
createMetaFile();
createBlockFile();
createBlockMetaFile();
deleteMetaFile();
deleteBlockFile();
truncateBlockFile();
scan(totalBlocks+3, 6, 2, 2, 3, 2);
scan(totalBlocks+1, 0, 0, 0, 0, 0);
// Test14: validate clean shutdown of DirectoryScanner
////assertTrue(scanner.getRunStatus()); //assumes "real" FSDataset, not sim
scanner.shutdown();
assertFalse(scanner.getRunStatus());
} finally {
if (scanner != null) {
scanner.shutdown();
scanner = null;
}
cluster.shutdown();
}
}
private void verifyAddition(long blockId, long genStamp, long size) {
final ReplicaInfo replicainfo;
replicainfo = FsDatasetTestUtil.fetchReplicaInfo(fds, bpid, blockId);
assertNotNull(replicainfo);
// Added block has the same file as the one created by the test
File file = new File(getBlockFile(blockId));
assertEquals(file.getName(),
FsDatasetTestUtil.getFile(fds, bpid, blockId).getName());
// Generation stamp is same as that of created file
assertEquals(genStamp, replicainfo.getGenerationStamp());
// File size matches
assertEquals(size, replicainfo.getNumBytes());
}
private void verifyDeletion(long blockId) {
// Ensure block does not exist in memory
assertNull(FsDatasetTestUtil.fetchReplicaInfo(fds, bpid, blockId));
}
private void verifyGenStamp(long blockId, long genStamp) {
final ReplicaInfo memBlock;
memBlock = FsDatasetTestUtil.fetchReplicaInfo(fds, bpid, blockId);
assertNotNull(memBlock);
assertEquals(genStamp, memBlock.getGenerationStamp());
}
private void verifyStorageType(long blockId, boolean expectTransient) {
final ReplicaInfo memBlock;
memBlock = FsDatasetTestUtil.fetchReplicaInfo(fds, bpid, blockId);
assertNotNull(memBlock);
assertThat(memBlock.getVolume().isTransientStorage(), is(expectTransient));
}
private static class TestFsVolumeSpi implements FsVolumeSpi {
@Override
public String[] getBlockPoolList() {
return new String[0];
}
@Override
public FsVolumeReference obtainReference() throws ClosedChannelException {
return null;
}
@Override
public long getAvailable() throws IOException {
return 0;
}
@Override
public String getBasePath() {
return (new File("/base")).getAbsolutePath();
}
@Override
public String getPath(String bpid) throws IOException {
return (new File("/base/current/" + bpid)).getAbsolutePath();
}
@Override
public File getFinalizedDir(String bpid) throws IOException {
return new File("/base/current/" + bpid + "/finalized");
}
@Override
public StorageType getStorageType() {
return StorageType.DEFAULT;
}
@Override
public String getStorageID() {
return "";
}
@Override
public void reserveSpaceForRbw(long bytesToReserve) {
}
@Override
public void releaseReservedSpace(long bytesToRelease) {
}
@Override
public boolean isTransientStorage() {
return false;
}
@Override
public void releaseLockedMemory(long bytesToRelease) {
}
@Override
public BlockIterator newBlockIterator(String bpid, String name) {
throw new UnsupportedOperationException();
}
@Override
public BlockIterator loadBlockIterator(String bpid, String name)
throws IOException {
throw new UnsupportedOperationException();
}
@Override
public FsDatasetSpi getDataset() {
throw new UnsupportedOperationException();
}
}
private final static TestFsVolumeSpi TEST_VOLUME = new TestFsVolumeSpi();
private final static String BPID_1 = "BP-783049782-127.0.0.1-1370971773491";
private final static String BPID_2 = "BP-367845636-127.0.0.1-5895645674231";
void testScanInfoObject(long blockId, File blockFile, File metaFile)
throws Exception {
DirectoryScanner.ScanInfo scanInfo =
new DirectoryScanner.ScanInfo(blockId, blockFile, metaFile, TEST_VOLUME);
assertEquals(blockId, scanInfo.getBlockId());
if (blockFile != null) {
assertEquals(blockFile.getAbsolutePath(),
scanInfo.getBlockFile().getAbsolutePath());
} else {
assertNull(scanInfo.getBlockFile());
}
if (metaFile != null) {
assertEquals(metaFile.getAbsolutePath(),
scanInfo.getMetaFile().getAbsolutePath());
} else {
assertNull(scanInfo.getMetaFile());
}
assertEquals(TEST_VOLUME, scanInfo.getVolume());
}
void testScanInfoObject(long blockId) throws Exception {
DirectoryScanner.ScanInfo scanInfo =
new DirectoryScanner.ScanInfo(blockId, null, null, null);
assertEquals(blockId, scanInfo.getBlockId());
assertNull(scanInfo.getBlockFile());
assertNull(scanInfo.getMetaFile());
}
@Test(timeout=120000)
public void TestScanInfo() throws Exception {
testScanInfoObject(123,
new File(TEST_VOLUME.getFinalizedDir(BPID_1).getAbsolutePath(),
"blk_123"),
new File(TEST_VOLUME.getFinalizedDir(BPID_1).getAbsolutePath(),
"blk_123__1001.meta"));
testScanInfoObject(464,
new File(TEST_VOLUME.getFinalizedDir(BPID_1).getAbsolutePath(),
"blk_123"),
null);
testScanInfoObject(523,
null,
new File(TEST_VOLUME.getFinalizedDir(BPID_1).getAbsolutePath(),
"blk_123__1009.meta"));
testScanInfoObject(789,
null,
null);
testScanInfoObject(456);
testScanInfoObject(123,
new File(TEST_VOLUME.getFinalizedDir(BPID_2).getAbsolutePath(),
"blk_567"),
new File(TEST_VOLUME.getFinalizedDir(BPID_2).getAbsolutePath(),
"blk_567__1004.meta"));
}
}
| 24,225 | 33.460882 | 98 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStorageReport.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.mockito.ArgumentCaptor;
import org.mockito.Mockito;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertNotSame;
import static org.junit.Assert.assertThat;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyInt;
import static org.mockito.Matchers.anyLong;
public class TestStorageReport {
public static final Log LOG = LogFactory.getLog(TestStorageReport.class);
private static final short REPL_FACTOR = 1;
private static final StorageType storageType = StorageType.SSD; // pick non-default.
private static Configuration conf;
private MiniDFSCluster cluster;
private DistributedFileSystem fs;
static String bpid;
@Before
public void startUpCluster() throws IOException {
conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(REPL_FACTOR)
.storageTypes(new StorageType[] { storageType, storageType } )
.build();
fs = cluster.getFileSystem();
bpid = cluster.getNamesystem().getBlockPoolId();
}
@After
public void shutDownCluster() throws IOException {
if (cluster != null) {
fs.close();
cluster.shutdown();
cluster = null;
}
}
/**
* Ensure that storage type and storage state are propagated
* in Storage Reports.
*/
@Test
public void testStorageReportHasStorageTypeAndState() throws IOException {
// Make sure we are not testing with the default type, that would not
// be a very good test.
assertNotSame(storageType, StorageType.DEFAULT);
NameNode nn = cluster.getNameNode();
DataNode dn = cluster.getDataNodes().get(0);
// Insert a spy object for the NN RPC.
DatanodeProtocolClientSideTranslatorPB nnSpy =
DataNodeTestUtils.spyOnBposToNN(dn, nn);
// Trigger a heartbeat so there is an interaction with the spy
// object.
DataNodeTestUtils.triggerHeartbeat(dn);
// Verify that the callback passed in the expected parameters.
ArgumentCaptor<StorageReport[]> captor =
ArgumentCaptor.forClass(StorageReport[].class);
Mockito.verify(nnSpy).sendHeartbeat(
any(DatanodeRegistration.class),
captor.capture(),
anyLong(), anyLong(), anyInt(), anyInt(), anyInt(),
Mockito.any(VolumeFailureSummary.class), Mockito.anyBoolean());
StorageReport[] reports = captor.getValue();
for (StorageReport report: reports) {
assertThat(report.getStorage().getStorageType(), is(storageType));
assertThat(report.getStorage().getState(), is(DatanodeStorage.State.NORMAL));
}
}
}
| 4,269 | 34.882353 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
import static org.apache.hadoop.test.MetricsAsserts.assertQuantileGauges;
import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
import static org.junit.Assert.*;
import java.io.Closeable;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.util.List;
import com.google.common.collect.Lists;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSOutputStream;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.util.Time;
import org.junit.Test;
import org.mockito.Mockito;
import javax.management.MBeanServer;
import javax.management.ObjectName;
public class TestDataNodeMetrics {
private static final Log LOG = LogFactory.getLog(TestDataNodeMetrics.class);
@Test
public void testDataNodeMetrics() throws Exception {
Configuration conf = new HdfsConfiguration();
SimulatedFSDataset.setFactory(conf);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
FileSystem fs = cluster.getFileSystem();
final long LONG_FILE_LEN = Integer.MAX_VALUE+1L;
DFSTestUtil.createFile(fs, new Path("/tmp.txt"),
LONG_FILE_LEN, (short)1, 1L);
List<DataNode> datanodes = cluster.getDataNodes();
assertEquals(datanodes.size(), 1);
DataNode datanode = datanodes.get(0);
MetricsRecordBuilder rb = getMetrics(datanode.getMetrics().name());
assertCounter("BytesWritten", LONG_FILE_LEN, rb);
assertTrue("Expected non-zero number of incremental block reports",
getLongCounter("IncrementalBlockReportsNumOps", rb) > 0);
} finally {
if (cluster != null) {cluster.shutdown();}
}
}
@Test
public void testSendDataPacketMetrics() throws Exception {
Configuration conf = new HdfsConfiguration();
final int interval = 1;
conf.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY, "" + interval);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
FileSystem fs = cluster.getFileSystem();
// Create and read a 1 byte file
Path tmpfile = new Path("/tmp.txt");
DFSTestUtil.createFile(fs, tmpfile,
(long)1, (short)1, 1L);
DFSTestUtil.readFile(fs, tmpfile);
List<DataNode> datanodes = cluster.getDataNodes();
assertEquals(datanodes.size(), 1);
DataNode datanode = datanodes.get(0);
MetricsRecordBuilder rb = getMetrics(datanode.getMetrics().name());
// Expect 2 packets, 1 for the 1 byte read, 1 for the empty packet
// signaling the end of the block
assertCounter("SendDataPacketTransferNanosNumOps", (long)2, rb);
assertCounter("SendDataPacketBlockedOnNetworkNanosNumOps", (long)2, rb);
// Wait for at least 1 rollover
Thread.sleep((interval + 1) * 1000);
// Check that the sendPacket percentiles rolled to non-zero values
String sec = interval + "s";
assertQuantileGauges("SendDataPacketBlockedOnNetworkNanos" + sec, rb);
assertQuantileGauges("SendDataPacketTransferNanos" + sec, rb);
} finally {
if (cluster != null) {cluster.shutdown();}
}
}
@Test
public void testReceivePacketMetrics() throws Exception {
Configuration conf = new HdfsConfiguration();
final int interval = 1;
conf.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY, "" + interval);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
Path testFile = new Path("/testFlushNanosMetric.txt");
FSDataOutputStream fout = fs.create(testFile);
fout.write(new byte[1]);
fout.hsync();
fout.close();
List<DataNode> datanodes = cluster.getDataNodes();
DataNode datanode = datanodes.get(0);
MetricsRecordBuilder dnMetrics = getMetrics(datanode.getMetrics().name());
// Expect two flushes, 1 for the flush that occurs after writing,
// 1 that occurs on closing the data and metadata files.
assertCounter("FlushNanosNumOps", 2L, dnMetrics);
// Expect two syncs, one from the hsync, one on close.
assertCounter("FsyncNanosNumOps", 2L, dnMetrics);
// Wait for at least 1 rollover
Thread.sleep((interval + 1) * 1000);
// Check the receivePacket percentiles that should be non-zero
String sec = interval + "s";
assertQuantileGauges("FlushNanos" + sec, dnMetrics);
assertQuantileGauges("FsyncNanos" + sec, dnMetrics);
} finally {
if (cluster != null) {cluster.shutdown();}
}
}
/**
* Tests that round-trip acks in a datanode write pipeline are correctly
* measured.
*/
@Test
public void testRoundTripAckMetric() throws Exception {
final int datanodeCount = 2;
final int interval = 1;
Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY, "" + interval);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(
datanodeCount).build();
try {
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
// Open a file and get the head of the pipeline
Path testFile = new Path("/testRoundTripAckMetric.txt");
FSDataOutputStream fsout = fs.create(testFile, (short) datanodeCount);
DFSOutputStream dout = (DFSOutputStream) fsout.getWrappedStream();
// Slow down the writes to catch the write pipeline
dout.setChunksPerPacket(5);
dout.setArtificialSlowdown(3000);
fsout.write(new byte[10000]);
DatanodeInfo[] pipeline = null;
int count = 0;
while (pipeline == null && count < 5) {
pipeline = dout.getPipeline();
System.out.println("Waiting for pipeline to be created.");
Thread.sleep(1000);
count++;
}
// Get the head node that should be receiving downstream acks
DatanodeInfo headInfo = pipeline[0];
DataNode headNode = null;
for (DataNode datanode : cluster.getDataNodes()) {
if (datanode.getDatanodeId().equals(headInfo)) {
headNode = datanode;
break;
}
}
assertNotNull("Could not find the head of the datanode write pipeline",
headNode);
// Close the file and wait for the metrics to rollover
Thread.sleep((interval + 1) * 1000);
// Check the ack was received
MetricsRecordBuilder dnMetrics = getMetrics(headNode.getMetrics()
.name());
assertTrue("Expected non-zero number of acks",
getLongCounter("PacketAckRoundTripTimeNanosNumOps", dnMetrics) > 0);
assertQuantileGauges("PacketAckRoundTripTimeNanos" + interval
+ "s", dnMetrics);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test(timeout=60000)
public void testTimeoutMetric() throws Exception {
final Configuration conf = new HdfsConfiguration();
final Path path = new Path("/test");
final MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
final List<FSDataOutputStream> streams = Lists.newArrayList();
try {
final FSDataOutputStream out =
cluster.getFileSystem().create(path, (short) 2);
final DataNodeFaultInjector injector = Mockito.mock
(DataNodeFaultInjector.class);
Mockito.doThrow(new IOException("mock IOException")).
when(injector).
writeBlockAfterFlush();
DataNodeFaultInjector.instance = injector;
streams.add(out);
out.writeBytes("old gs data\n");
out.hflush();
/* Test the metric. */
final MetricsRecordBuilder dnMetrics =
getMetrics(cluster.getDataNodes().get(0).getMetrics().name());
assertCounter("DatanodeNetworkErrors", 1L, dnMetrics);
/* Test JMX datanode network counts. */
final MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
final ObjectName mxbeanName =
new ObjectName("Hadoop:service=DataNode,name=DataNodeInfo");
final Object dnc =
mbs.getAttribute(mxbeanName, "DatanodeNetworkCounts");
final String allDnc = dnc.toString();
assertTrue("expected to see loopback address",
allDnc.indexOf("127.0.0.1") >= 0);
assertTrue("expected to see networkErrors",
allDnc.indexOf("networkErrors") >= 0);
} finally {
IOUtils.cleanup(LOG, streams.toArray(new Closeable[0]));
if (cluster != null) {
cluster.shutdown();
}
DataNodeFaultInjector.instance = new DataNodeFaultInjector();
}
}
/**
* This function ensures that writing causes TotalWritetime to increment
* and reading causes totalReadTime to move.
* @throws Exception
*/
@Test
public void testDataNodeTimeSpend() throws Exception {
Configuration conf = new HdfsConfiguration();
SimulatedFSDataset.setFactory(conf);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
FileSystem fs = cluster.getFileSystem();
List<DataNode> datanodes = cluster.getDataNodes();
assertEquals(datanodes.size(), 1);
DataNode datanode = datanodes.get(0);
MetricsRecordBuilder rb = getMetrics(datanode.getMetrics().name());
final long LONG_FILE_LEN = 1024 * 1024 * 10;
long startWriteValue = getLongCounter("TotalWriteTime", rb);
long startReadValue = getLongCounter("TotalReadTime", rb);
for (int x =0; x < 50; x++) {
DFSTestUtil.createFile(fs, new Path("/time.txt."+ x),
LONG_FILE_LEN, (short) 1, Time.monotonicNow());
}
for (int x =0; x < 50; x++) {
String s = DFSTestUtil.readFile(fs, new Path("/time.txt." + x));
}
MetricsRecordBuilder rbNew = getMetrics(datanode.getMetrics().name());
long endWriteValue = getLongCounter("TotalWriteTime", rbNew);
long endReadValue = getLongCounter("TotalReadTime", rbNew);
assertTrue(endReadValue > startReadValue);
assertTrue(endWriteValue > startWriteValue);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test
public void testDatanodeBlocksReplicatedMetric() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
FileSystem fs = cluster.getFileSystem();
List<DataNode> datanodes = cluster.getDataNodes();
assertEquals(datanodes.size(), 1);
DataNode datanode = datanodes.get(0);
MetricsRecordBuilder rb = getMetrics(datanode.getMetrics().name());
long blocksReplicated = getLongCounter("BlocksReplicated", rb);
assertEquals("No blocks replicated yet", 0, blocksReplicated);
Path path = new Path("/counter.txt");
DFSTestUtil.createFile(fs, path, 1024, (short) 2, Time.monotonicNow());
cluster.startDataNodes(conf, 1, true, StartupOption.REGULAR, null);
ExtendedBlock firstBlock = DFSTestUtil.getFirstBlock(fs, path);
DFSTestUtil.waitForReplication(cluster, firstBlock, 1, 2, 0);
MetricsRecordBuilder rbNew = getMetrics(datanode.getMetrics().name());
blocksReplicated = getLongCounter("BlocksReplicated", rbNew);
assertEquals("blocks replicated counter incremented", 1, blocksReplicated);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
}
| 13,067 | 39.08589 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataXceiverLazyPersistHint.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.*;
import org.apache.hadoop.hdfs.net.*;
import org.apache.hadoop.hdfs.protocol.*;
import org.apache.hadoop.hdfs.protocol.datatransfer.*;
import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
import org.apache.hadoop.util.DataChecksum;
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
import org.mockito.ArgumentCaptor;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.Arrays;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.core.Is.is;
import static org.mockito.Mockito.*;
/**
* Mock-based unit test to verify that the DataXceiver correctly handles the
* LazyPersist hint from clients.
*/
public class TestDataXceiverLazyPersistHint {
@Rule
public Timeout timeout = new Timeout(300000);
private enum PeerLocality {
LOCAL,
REMOTE
}
private enum NonLocalLazyPersist {
ALLOWED,
NOT_ALLOWED
}
/**
* Ensure that the correct hint is passed to the block receiver when
* the client is local.
*/
@Test
public void testWithLocalClient() throws IOException {
ArgumentCaptor<Boolean> captor = ArgumentCaptor.forClass(Boolean.class);
DataXceiver xceiver = makeStubDataXceiver(
PeerLocality.LOCAL, NonLocalLazyPersist.NOT_ALLOWED, captor);
for (Boolean lazyPersistSetting : Arrays.asList(true, false)) {
issueWriteBlockCall(xceiver, lazyPersistSetting);
assertThat(captor.getValue(), is(lazyPersistSetting));
}
}
/**
* Ensure that hint is always false when the client is remote.
*/
@Test
public void testWithRemoteClient() throws IOException {
ArgumentCaptor<Boolean> captor = ArgumentCaptor.forClass(Boolean.class);
DataXceiver xceiver = makeStubDataXceiver(
PeerLocality.REMOTE, NonLocalLazyPersist.NOT_ALLOWED, captor);
for (Boolean lazyPersistSetting : Arrays.asList(true, false)) {
issueWriteBlockCall(xceiver, lazyPersistSetting);
assertThat(captor.getValue(), is(false));
}
}
/**
* Ensure that the correct hint is passed to the block receiver when
* the client is remote AND dfs.datanode.allow.non.local.lazy.persist
* is set to true.
*/
@Test
public void testOverrideWithRemoteClient() throws IOException {
ArgumentCaptor<Boolean> captor = ArgumentCaptor.forClass(Boolean.class);
DataXceiver xceiver = makeStubDataXceiver(
PeerLocality.REMOTE, NonLocalLazyPersist.ALLOWED, captor);
for (Boolean lazyPersistSetting : Arrays.asList(true, false)) {
issueWriteBlockCall(xceiver, lazyPersistSetting);
assertThat(captor.getValue(), is(lazyPersistSetting));
}
}
/**
* Issue a write block call with dummy parameters. The only parameter useful
* for this test is the value of lazyPersist.
*/
private void issueWriteBlockCall(DataXceiver xceiver, boolean lazyPersist)
throws IOException {
xceiver.writeBlock(
new ExtendedBlock("Dummy-pool", 0L),
StorageType.RAM_DISK,
null,
"Dummy-Client",
new DatanodeInfo[0],
new StorageType[0],
mock(DatanodeInfo.class),
BlockConstructionStage.PIPELINE_SETUP_CREATE,
0, 0, 0, 0,
DataChecksum.newDataChecksum(DataChecksum.Type.NULL, 0),
CachingStrategy.newDefaultStrategy(),
lazyPersist,
false, null);
}
// Helper functions to setup the mock objects.
private static DataXceiver makeStubDataXceiver(
PeerLocality locality,
NonLocalLazyPersist nonLocalLazyPersist,
final ArgumentCaptor<Boolean> captor) throws IOException {
DataXceiver xceiverSpy = spy(DataXceiver.create(
getMockPeer(locality),
getMockDn(nonLocalLazyPersist),
mock(DataXceiverServer.class)));
doReturn(mock(BlockReceiver.class)).when(xceiverSpy).getBlockReceiver(
any(ExtendedBlock.class), any(StorageType.class),
any(DataInputStream.class), anyString(), anyString(),
any(BlockConstructionStage.class), anyLong(), anyLong(), anyLong(),
anyString(), any(DatanodeInfo.class), any(DataNode.class),
any(DataChecksum.class), any(CachingStrategy.class),
captor.capture(), anyBoolean());
doReturn(mock(DataOutputStream.class)).when(xceiverSpy)
.getBufferedOutputStream();
return xceiverSpy;
}
private static Peer getMockPeer(PeerLocality locality) {
Peer peer = mock(Peer.class);
when(peer.isLocal()).thenReturn(locality == PeerLocality.LOCAL);
when(peer.getRemoteAddressString()).thenReturn("1.1.1.1:1000");
when(peer.getLocalAddressString()).thenReturn("2.2.2.2:2000");
return peer;
}
private static DataNode getMockDn(NonLocalLazyPersist nonLocalLazyPersist) {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(
DFS_DATANODE_NON_LOCAL_LAZY_PERSIST,
nonLocalLazyPersist == NonLocalLazyPersist.ALLOWED);
DNConf dnConf = new DNConf(conf);
DataNodeMetrics mockMetrics = mock(DataNodeMetrics.class);
DataNode mockDn = mock(DataNode.class);
when(mockDn.getDnConf()).thenReturn(dnConf);
when(mockDn.getConf()).thenReturn(conf);
when(mockDn.getMetrics()).thenReturn(mockMetrics);
return mockDn;
}
}
| 6,306 | 34.234637 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assume.assumeTrue;
import java.io.File;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.Socket;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeoutException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.ReconfigurationException;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.BlockReader;
import org.apache.hadoop.hdfs.BlockReaderFactory;
import org.apache.hadoop.hdfs.ClientContext;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.RemotePeerFactory;
import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
import org.apache.hadoop.hdfs.net.Peer;
import org.apache.hadoop.hdfs.net.TcpPeerServer;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.token.Token;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
/**
* Fine-grain testing of block files and locations after volume failure.
*/
public class TestDataNodeVolumeFailure {
final private int block_size = 512;
MiniDFSCluster cluster = null;
private Configuration conf;
final int dn_num = 2;
final int blocks_num = 30;
final short repl=2;
File dataDir = null;
File data_fail = null;
File failedDir = null;
private FileSystem fs;
// mapping blocks to Meta files(physical files) and locs(NameNode locations)
private class BlockLocs {
public int num_files = 0;
public int num_locs = 0;
}
// block id to BlockLocs
final Map<String, BlockLocs> block_map = new HashMap<String, BlockLocs> ();
@Before
public void setUp() throws Exception {
// bring up a cluster of 2
conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, block_size);
// Allow a single volume failure (there are two volumes)
conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(dn_num).build();
cluster.waitActive();
fs = cluster.getFileSystem();
dataDir = new File(cluster.getDataDirectory());
}
@After
public void tearDown() throws Exception {
if(data_fail != null) {
FileUtil.setWritable(data_fail, true);
}
if(failedDir != null) {
FileUtil.setWritable(failedDir, true);
}
if(cluster != null) {
cluster.shutdown();
}
}
/*
* Verify the number of blocks and files are correct after volume failure,
* and that we can replicate to both datanodes even after a single volume
* failure if the configuration parameter allows this.
*/
@Test
public void testVolumeFailure() throws Exception {
System.out.println("Data dir: is " + dataDir.getPath());
// Data dir structure is dataDir/data[1-4]/[current,tmp...]
// data1,2 is for datanode 1, data2,3 - datanode2
String filename = "/test.txt";
Path filePath = new Path(filename);
// we use only small number of blocks to avoid creating subdirs in the data dir..
int filesize = block_size*blocks_num;
DFSTestUtil.createFile(fs, filePath, filesize, repl, 1L);
DFSTestUtil.waitReplication(fs, filePath, repl);
System.out.println("file " + filename + "(size " +
filesize + ") is created and replicated");
// fail the volume
// delete/make non-writable one of the directories (failed volume)
data_fail = new File(dataDir, "data3");
failedDir = MiniDFSCluster.getFinalizedDir(dataDir,
cluster.getNamesystem().getBlockPoolId());
if (failedDir.exists() &&
//!FileUtil.fullyDelete(failedDir)
!deteteBlocks(failedDir)
) {
throw new IOException("Could not delete hdfs directory '" + failedDir + "'");
}
data_fail.setReadOnly();
failedDir.setReadOnly();
System.out.println("Deleteing " + failedDir.getPath() + "; exist=" + failedDir.exists());
// access all the blocks on the "failed" DataNode,
// we need to make sure that the "failed" volume is being accessed -
// and that will cause failure, blocks removal, "emergency" block report
triggerFailure(filename, filesize);
// make sure a block report is sent
DataNode dn = cluster.getDataNodes().get(1); //corresponds to dir data3
String bpid = cluster.getNamesystem().getBlockPoolId();
DatanodeRegistration dnR = dn.getDNRegistrationForBP(bpid);
Map<DatanodeStorage, BlockListAsLongs> perVolumeBlockLists =
dn.getFSDataset().getBlockReports(bpid);
// Send block report
StorageBlockReport[] reports =
new StorageBlockReport[perVolumeBlockLists.size()];
int reportIndex = 0;
for(Map.Entry<DatanodeStorage, BlockListAsLongs> kvPair : perVolumeBlockLists.entrySet()) {
DatanodeStorage dnStorage = kvPair.getKey();
BlockListAsLongs blockList = kvPair.getValue();
reports[reportIndex++] =
new StorageBlockReport(dnStorage, blockList);
}
cluster.getNameNodeRpc().blockReport(dnR, bpid, reports, null);
// verify number of blocks and files...
verify(filename, filesize);
// create another file (with one volume failed).
System.out.println("creating file test1.txt");
Path fileName1 = new Path("/test1.txt");
DFSTestUtil.createFile(fs, fileName1, filesize, repl, 1L);
// should be able to replicate to both nodes (2 DN, repl=2)
DFSTestUtil.waitReplication(fs, fileName1, repl);
System.out.println("file " + fileName1.getName() +
" is created and replicated");
}
/**
* Test that DataStorage and BlockPoolSliceStorage remove the failed volume
* after failure.
*/
@Test(timeout=150000)
public void testFailedVolumeBeingRemovedFromDataNode()
throws InterruptedException, IOException, TimeoutException {
// The test uses DataNodeTestUtils#injectDataDirFailure() to simulate
// volume failures which is currently not supported on Windows.
assumeTrue(!Path.WINDOWS);
Path file1 = new Path("/test1");
DFSTestUtil.createFile(fs, file1, 1024, (short) 2, 1L);
DFSTestUtil.waitReplication(fs, file1, (short) 2);
File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
DataNodeTestUtils.injectDataDirFailure(dn0Vol1);
DataNode dn0 = cluster.getDataNodes().get(0);
checkDiskErrorSync(dn0);
// Verify dn0Vol1 has been completely removed from DN0.
// 1. dn0Vol1 is removed from DataStorage.
DataStorage storage = dn0.getStorage();
assertEquals(1, storage.getNumStorageDirs());
for (int i = 0; i < storage.getNumStorageDirs(); i++) {
Storage.StorageDirectory sd = storage.getStorageDir(i);
assertFalse(sd.getRoot().getAbsolutePath().startsWith(
dn0Vol1.getAbsolutePath()
));
}
final String bpid = cluster.getNamesystem().getBlockPoolId();
BlockPoolSliceStorage bpsStorage = storage.getBPStorage(bpid);
assertEquals(1, bpsStorage.getNumStorageDirs());
for (int i = 0; i < bpsStorage.getNumStorageDirs(); i++) {
Storage.StorageDirectory sd = bpsStorage.getStorageDir(i);
assertFalse(sd.getRoot().getAbsolutePath().startsWith(
dn0Vol1.getAbsolutePath()
));
}
// 2. dn0Vol1 is removed from FsDataset
FsDatasetSpi<? extends FsVolumeSpi> data = dn0.getFSDataset();
try (FsDatasetSpi.FsVolumeReferences vols = data.getFsVolumeReferences()) {
for (FsVolumeSpi volume : vols) {
assertNotEquals(new File(volume.getBasePath()).getAbsoluteFile(),
dn0Vol1.getAbsoluteFile());
}
}
// 3. all blocks on dn0Vol1 have been removed.
for (ReplicaInfo replica : FsDatasetTestUtil.getReplicas(data, bpid)) {
assertNotNull(replica.getVolume());
assertNotEquals(
new File(replica.getVolume().getBasePath()).getAbsoluteFile(),
dn0Vol1.getAbsoluteFile());
}
// 4. dn0Vol1 is not in DN0's configuration and dataDirs anymore.
String[] dataDirStrs =
dn0.getConf().get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY).split(",");
assertEquals(1, dataDirStrs.length);
assertFalse(dataDirStrs[0].contains(dn0Vol1.getAbsolutePath()));
}
private static void checkDiskErrorSync(DataNode dn)
throws InterruptedException {
final long lastDiskErrorCheck = dn.getLastDiskErrorCheck();
dn.checkDiskErrorAsync();
// Wait 10 seconds for checkDiskError thread to finish and discover volume
// failures.
int count = 100;
while (count > 0 && dn.getLastDiskErrorCheck() == lastDiskErrorCheck) {
Thread.sleep(100);
count--;
}
assertTrue("Disk checking thread does not finish in 10 seconds",
count > 0);
}
/**
* Test DataNode stops when the number of failed volumes exceeds
* dfs.datanode.failed.volumes.tolerated .
*/
@Test(timeout=10000)
public void testDataNodeShutdownAfterNumFailedVolumeExceedsTolerated()
throws InterruptedException, IOException {
// make both data directories to fail on dn0
final File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
final File dn0Vol2 = new File(dataDir, "data" + (2 * 0 + 2));
DataNodeTestUtils.injectDataDirFailure(dn0Vol1, dn0Vol2);
DataNode dn0 = cluster.getDataNodes().get(0);
checkDiskErrorSync(dn0);
// DN0 should stop after the number of failure disks exceed tolerated
// value (1).
assertFalse(dn0.shouldRun());
}
/**
* Test that DN does not shutdown, as long as failure volumes being hot swapped.
*/
@Test
public void testVolumeFailureRecoveredByHotSwappingVolume()
throws InterruptedException, ReconfigurationException, IOException {
final File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
final File dn0Vol2 = new File(dataDir, "data" + (2 * 0 + 2));
final DataNode dn0 = cluster.getDataNodes().get(0);
final String oldDataDirs = dn0.getConf().get(
DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
// Fail dn0Vol1 first.
DataNodeTestUtils.injectDataDirFailure(dn0Vol1);
checkDiskErrorSync(dn0);
// Hot swap out the failure volume.
String dataDirs = dn0Vol2.getPath();
dn0.reconfigurePropertyImpl(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
dataDirs);
// Fix failure volume dn0Vol1 and remount it back.
DataNodeTestUtils.restoreDataDirFromFailure(dn0Vol1);
dn0.reconfigurePropertyImpl(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
oldDataDirs);
// Fail dn0Vol2. Now since dn0Vol1 has been fixed, DN0 has sufficient
// resources, thus it should keep running.
DataNodeTestUtils.injectDataDirFailure(dn0Vol2);
checkDiskErrorSync(dn0);
assertTrue(dn0.shouldRun());
}
/**
* Test changing the number of volumes does not impact the disk failure
* tolerance.
*/
@Test
public void testTolerateVolumeFailuresAfterAddingMoreVolumes()
throws InterruptedException, ReconfigurationException, IOException {
final File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
final File dn0Vol2 = new File(dataDir, "data" + (2 * 0 + 2));
final File dn0VolNew = new File(dataDir, "data_new");
final DataNode dn0 = cluster.getDataNodes().get(0);
final String oldDataDirs = dn0.getConf().get(
DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
// Add a new volume to DN0
dn0.reconfigurePropertyImpl(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
oldDataDirs + "," + dn0VolNew.getAbsolutePath());
// Fail dn0Vol1 first and hot swap it.
DataNodeTestUtils.injectDataDirFailure(dn0Vol1);
checkDiskErrorSync(dn0);
assertTrue(dn0.shouldRun());
// Fail dn0Vol2, now dn0 should stop, because we only tolerate 1 disk failure.
DataNodeTestUtils.injectDataDirFailure(dn0Vol2);
checkDiskErrorSync(dn0);
assertFalse(dn0.shouldRun());
}
/**
* Test that there are under replication blocks after vol failures
*/
@Test
public void testUnderReplicationAfterVolFailure() throws Exception {
// The test uses DataNodeTestUtils#injectDataDirFailure() to simulate
// volume failures which is currently not supported on Windows.
assumeTrue(!Path.WINDOWS);
// Bring up one more datanode
cluster.startDataNodes(conf, 1, true, null, null);
cluster.waitActive();
final BlockManager bm = cluster.getNamesystem().getBlockManager();
Path file1 = new Path("/test1");
DFSTestUtil.createFile(fs, file1, 1024, (short)3, 1L);
DFSTestUtil.waitReplication(fs, file1, (short)3);
// Fail the first volume on both datanodes
File dn1Vol1 = new File(dataDir, "data"+(2*0+1));
File dn2Vol1 = new File(dataDir, "data"+(2*1+1));
DataNodeTestUtils.injectDataDirFailure(dn1Vol1, dn2Vol1);
Path file2 = new Path("/test2");
DFSTestUtil.createFile(fs, file2, 1024, (short)3, 1L);
DFSTestUtil.waitReplication(fs, file2, (short)3);
// underReplicatedBlocks are due to failed volumes
int underReplicatedBlocks =
BlockManagerTestUtil.checkHeartbeatAndGetUnderReplicatedBlocksCount(
cluster.getNamesystem(), bm);
assertTrue("There is no under replicated block after volume failure",
underReplicatedBlocks > 0);
}
/**
* verifies two things:
* 1. number of locations of each block in the name node
* matches number of actual files
* 2. block files + pending block equals to total number of blocks that a file has
* including the replication (HDFS file has 30 blocks, repl=2 - total 60
* @param fn - file name
* @param fs - file size
* @throws IOException
*/
private void verify(String fn, int fs) throws IOException{
// now count how many physical blocks are there
int totalReal = countRealBlocks(block_map);
System.out.println("countRealBlocks counted " + totalReal + " blocks");
// count how many blocks store in NN structures.
int totalNN = countNNBlocks(block_map, fn, fs);
System.out.println("countNNBlocks counted " + totalNN + " blocks");
for(String bid : block_map.keySet()) {
BlockLocs bl = block_map.get(bid);
// System.out.println(bid + "->" + bl.num_files + "vs." + bl.num_locs);
// number of physical files (1 or 2) should be same as number of datanodes
// in the list of the block locations
assertEquals("Num files should match num locations",
bl.num_files, bl.num_locs);
}
assertEquals("Num physical blocks should match num stored in the NN",
totalReal, totalNN);
// now check the number of under-replicated blocks
FSNamesystem fsn = cluster.getNamesystem();
// force update of all the metric counts by calling computeDatanodeWork
BlockManagerTestUtil.getComputedDatanodeWork(fsn.getBlockManager());
// get all the counts
long underRepl = fsn.getUnderReplicatedBlocks();
long pendRepl = fsn.getPendingReplicationBlocks();
long totalRepl = underRepl + pendRepl;
System.out.println("underreplicated after = "+ underRepl +
" and pending repl =" + pendRepl + "; total underRepl = " + totalRepl);
System.out.println("total blocks (real and replicating):" +
(totalReal + totalRepl) + " vs. all files blocks " + blocks_num*2);
// together all the blocks should be equal to all real + all underreplicated
assertEquals("Incorrect total block count",
totalReal + totalRepl, blocks_num * repl);
}
/**
* go to each block on the 2nd DataNode until it fails...
* @param path
* @param size
* @throws IOException
*/
private void triggerFailure(String path, long size) throws IOException {
NamenodeProtocols nn = cluster.getNameNodeRpc();
List<LocatedBlock> locatedBlocks =
nn.getBlockLocations(path, 0, size).getLocatedBlocks();
for (LocatedBlock lb : locatedBlocks) {
DatanodeInfo dinfo = lb.getLocations()[1];
ExtendedBlock b = lb.getBlock();
try {
accessBlock(dinfo, lb);
} catch (IOException e) {
System.out.println("Failure triggered, on block: " + b.getBlockId() +
"; corresponding volume should be removed by now");
break;
}
}
}
/**
* simulate failure delete all the block files
* @param dir
* @throws IOException
*/
private boolean deteteBlocks(File dir) {
File [] fileList = dir.listFiles();
for(File f : fileList) {
if(f.getName().startsWith(Block.BLOCK_FILE_PREFIX)) {
if(!f.delete())
return false;
}
}
return true;
}
/**
* try to access a block on a data node. If fails - throws exception
* @param datanode
* @param lblock
* @throws IOException
*/
private void accessBlock(DatanodeInfo datanode, LocatedBlock lblock)
throws IOException {
InetSocketAddress targetAddr = null;
ExtendedBlock block = lblock.getBlock();
targetAddr = NetUtils.createSocketAddr(datanode.getXferAddr());
BlockReader blockReader = new BlockReaderFactory(new DfsClientConf(conf)).
setInetSocketAddress(targetAddr).
setBlock(block).
setFileName(BlockReaderFactory.getFileName(targetAddr,
"test-blockpoolid", block.getBlockId())).
setBlockToken(lblock.getBlockToken()).
setStartOffset(0).
setLength(-1).
setVerifyChecksum(true).
setClientName("TestDataNodeVolumeFailure").
setDatanodeInfo(datanode).
setCachingStrategy(CachingStrategy.newDefaultStrategy()).
setClientCacheContext(ClientContext.getFromConf(conf)).
setConfiguration(conf).
setRemotePeerFactory(new RemotePeerFactory() {
@Override
public Peer newConnectedPeer(InetSocketAddress addr,
Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
throws IOException {
Peer peer = null;
Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
try {
sock.connect(addr, HdfsServerConstants.READ_TIMEOUT);
sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
peer = TcpPeerServer.peerFromSocket(sock);
} finally {
if (peer == null) {
IOUtils.closeSocket(sock);
}
}
return peer;
}
}).
build();
blockReader.close();
}
/**
* Count datanodes that have copies of the blocks for a file
* put it into the map
* @param map
* @param path
* @param size
* @return
* @throws IOException
*/
private int countNNBlocks(Map<String, BlockLocs> map, String path, long size)
throws IOException {
int total = 0;
NamenodeProtocols nn = cluster.getNameNodeRpc();
List<LocatedBlock> locatedBlocks =
nn.getBlockLocations(path, 0, size).getLocatedBlocks();
//System.out.println("Number of blocks: " + locatedBlocks.size());
for(LocatedBlock lb : locatedBlocks) {
String blockId = ""+lb.getBlock().getBlockId();
//System.out.print(blockId + ": ");
DatanodeInfo[] dn_locs = lb.getLocations();
BlockLocs bl = map.get(blockId);
if(bl == null) {
bl = new BlockLocs();
}
//System.out.print(dn_info.name+",");
total += dn_locs.length;
bl.num_locs += dn_locs.length;
map.put(blockId, bl);
//System.out.println();
}
return total;
}
/**
* look for real blocks
* by counting *.meta files in all the storage dirs
* @param map
* @return
*/
private int countRealBlocks(Map<String, BlockLocs> map) {
int total = 0;
final String bpid = cluster.getNamesystem().getBlockPoolId();
for(int i=0; i<dn_num; i++) {
for(int j=0; j<=1; j++) {
File storageDir = cluster.getInstanceStorageDir(i, j);
File dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
if(dir == null) {
System.out.println("dir is null for dn=" + i + " and data_dir=" + j);
continue;
}
List<File> res = MiniDFSCluster.getAllBlockMetadataFiles(dir);
if(res == null) {
System.out.println("res is null for dir = " + dir + " i=" + i + " and j=" + j);
continue;
}
//System.out.println("for dn" + i + "." + j + ": " + dir + "=" + res.length+ " files");
//int ii = 0;
for(File f: res) {
String s = f.getName();
// cut off "blk_-" at the beginning and ".meta" at the end
assertNotNull("Block file name should not be null", s);
String bid = s.substring(s.indexOf("_")+1, s.lastIndexOf("_"));
//System.out.println(ii++ + ". block " + s + "; id=" + bid);
BlockLocs val = map.get(bid);
if(val == null) {
val = new BlockLocs();
}
val.num_files ++; // one more file for the block
map.put(bid, val);
}
//System.out.println("dir1="+dir.getPath() + "blocks=" + res.length);
//System.out.println("dir2="+dir2.getPath() + "blocks=" + res2.length);
total += res.size();
}
}
return total;
}
}
| 23,582 | 36.7328 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.io.EOFException;
import java.io.File;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.concurrent.ThreadLocalRandom;
import com.google.common.base.Supplier;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat;
import org.apache.hadoop.hdfs.server.protocol.RegisterCommand;
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
/**
* This tests DatanodeProtocol retry policy
*/
public class TestDatanodeProtocolRetryPolicy {
private static final Log LOG = LogFactory.getLog(
TestDatanodeProtocolRetryPolicy.class);
private static final String DATA_DIR =
MiniDFSCluster.getBaseDirectory() + "data";
private DataNode dn;
private Configuration conf;
private boolean tearDownDone;
ArrayList<StorageLocation> locations = new ArrayList<StorageLocation>();
private final static String CLUSTER_ID = "testClusterID";
private final static String POOL_ID = "BP-TEST";
private final static InetSocketAddress NN_ADDR = new InetSocketAddress(
"localhost", 5020);
private static DatanodeRegistration datanodeRegistration =
DFSTestUtil.getLocalDatanodeRegistration();
static {
((Log4JLogger)LOG).getLogger().setLevel(Level.ALL);
}
/**
* Starts an instance of DataNode
* @throws IOException
*/
@Before
public void startUp() throws IOException, URISyntaxException {
tearDownDone = false;
conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, DATA_DIR);
conf.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "0.0.0.0:0");
conf.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
conf.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0");
conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
FileSystem.setDefaultUri(conf,
"hdfs://" + NN_ADDR.getHostName() + ":" + NN_ADDR.getPort());
File dataDir = new File(DATA_DIR);
FileUtil.fullyDelete(dataDir);
dataDir.mkdirs();
StorageLocation location = StorageLocation.parse(dataDir.getPath());
locations.add(location);
}
/**
* Cleans the resources and closes the instance of datanode
* @throws IOException if an error occurred
*/
@After
public void tearDown() throws IOException {
if (!tearDownDone && dn != null) {
try {
dn.shutdown();
} catch(Exception e) {
LOG.error("Cannot close: ", e);
} finally {
File dir = new File(DATA_DIR);
if (dir.exists())
Assert.assertTrue(
"Cannot delete data-node dirs", FileUtil.fullyDelete(dir));
}
tearDownDone = true;
}
}
private void waitForBlockReport(
final DatanodeProtocolClientSideTranslatorPB mockNN) throws Exception {
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
try {
Mockito.verify(mockNN).blockReport(
Mockito.eq(datanodeRegistration),
Mockito.eq(POOL_ID),
Mockito.<StorageBlockReport[]>anyObject(),
Mockito.<BlockReportContext>anyObject());
return true;
} catch (Throwable t) {
LOG.info("waiting on block report: " + t.getMessage());
return false;
}
}
}, 500, 100000);
}
/**
* Verify the following scenario.
* 1. The initial DatanodeProtocol.registerDatanode succeeds.
* 2. DN starts heartbeat process.
* 3. In the first heartbeat, NN asks DN to reregister.
* 4. DN calls DatanodeProtocol.registerDatanode.
* 5. DatanodeProtocol.registerDatanode throws EOFException.
* 6. DN retries.
* 7. DatanodeProtocol.registerDatanode succeeds.
*/
@Test(timeout = 60000)
public void testDatanodeRegistrationRetry() throws Exception {
final DatanodeProtocolClientSideTranslatorPB namenode =
mock(DatanodeProtocolClientSideTranslatorPB.class);
Mockito.doAnswer(new Answer<DatanodeRegistration>() {
int i = 0;
@Override
public DatanodeRegistration answer(InvocationOnMock invocation)
throws Throwable {
i++;
if ( i > 1 && i < 5) {
LOG.info("mockito exception " + i);
throw new EOFException("TestDatanodeProtocolRetryPolicy");
} else {
DatanodeRegistration dr =
(DatanodeRegistration) invocation.getArguments()[0];
datanodeRegistration =
new DatanodeRegistration(dr.getDatanodeUuid(), dr);
LOG.info("mockito succeeded " + datanodeRegistration);
return datanodeRegistration;
}
}
}).when(namenode).registerDatanode(
Mockito.any(DatanodeRegistration.class));
when(namenode.versionRequest()).thenReturn(
new NamespaceInfo(1, CLUSTER_ID, POOL_ID, 1L));
Mockito.doAnswer(new Answer<HeartbeatResponse>() {
int i = 0;
@Override
public HeartbeatResponse answer(InvocationOnMock invocation)
throws Throwable {
i++;
HeartbeatResponse heartbeatResponse;
if ( i == 1 ) {
LOG.info("mockito heartbeatResponse registration " + i);
heartbeatResponse = new HeartbeatResponse(
new DatanodeCommand[]{RegisterCommand.REGISTER},
new NNHAStatusHeartbeat(HAServiceState.ACTIVE, 1),
null, ThreadLocalRandom.current().nextLong() | 1L);
} else {
LOG.info("mockito heartbeatResponse " + i);
heartbeatResponse = new HeartbeatResponse(
new DatanodeCommand[0],
new NNHAStatusHeartbeat(HAServiceState.ACTIVE, 1),
null, ThreadLocalRandom.current().nextLong() | 1L);
}
return heartbeatResponse;
}
}).when(namenode).sendHeartbeat(
Mockito.any(DatanodeRegistration.class),
Mockito.any(StorageReport[].class),
Mockito.anyLong(),
Mockito.anyLong(),
Mockito.anyInt(),
Mockito.anyInt(),
Mockito.anyInt(),
Mockito.any(VolumeFailureSummary.class),
Mockito.anyBoolean());
dn = new DataNode(conf, locations, null) {
@Override
DatanodeProtocolClientSideTranslatorPB connectToNN(
InetSocketAddress nnAddr) throws IOException {
Assert.assertEquals(NN_ADDR, nnAddr);
return namenode;
}
};
// Trigger a heartbeat so that it acknowledges the NN as active.
dn.getAllBpOs().get(0).triggerHeartbeatForTests();
waitForBlockReport(namenode);
}
}
| 8,919 | 36.322176 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assume.assumeTrue;
import java.io.File;
import java.util.ArrayList;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.ReconfigurationException;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
import org.apache.hadoop.io.IOUtils;
import org.apache.log4j.Level;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
/**
* Test reporting of DN volume failure counts and metrics.
*/
public class TestDataNodeVolumeFailureReporting {
private static final Log LOG = LogFactory.getLog(TestDataNodeVolumeFailureReporting.class);
{
((Log4JLogger)TestDataNodeVolumeFailureReporting.LOG).getLogger().setLevel(Level.ALL);
}
private FileSystem fs;
private MiniDFSCluster cluster;
private Configuration conf;
private String dataDir;
private long volumeCapacity;
// Sleep at least 3 seconds (a 1s heartbeat plus padding) to allow
// for heartbeats to propagate from the datanodes to the namenode.
final int WAIT_FOR_HEARTBEATS = 3000;
// Wait at least (2 * re-check + 10 * heartbeat) seconds for
// a datanode to be considered dead by the namenode.
final int WAIT_FOR_DEATH = 15000;
@Before
public void setUp() throws Exception {
// These tests use DataNodeTestUtils#injectDataDirFailure() to simulate
// volume failures which is currently not supported on Windows.
assumeTrue(!Path.WINDOWS);
// Allow a single volume failure (there are two volumes)
initCluster(1, 2, 1);
}
@After
public void tearDown() throws Exception {
IOUtils.cleanup(LOG, fs);
if (cluster != null) {
cluster.shutdown();
}
}
/**
* Test that individual volume failures do not cause DNs to fail, that
* all volumes failed on a single datanode do cause it to fail, and
* that the capacities and liveliness is adjusted correctly in the NN.
*/
@Test
public void testSuccessiveVolumeFailures() throws Exception {
// Bring up two more datanodes
cluster.startDataNodes(conf, 2, true, null, null);
cluster.waitActive();
/*
* Calculate the total capacity of all the datanodes. Sleep for
* three seconds to be sure the datanodes have had a chance to
* heartbeat their capacities.
*/
Thread.sleep(WAIT_FOR_HEARTBEATS);
final DatanodeManager dm = cluster.getNamesystem().getBlockManager(
).getDatanodeManager();
final long origCapacity = DFSTestUtil.getLiveDatanodeCapacity(dm);
long dnCapacity = DFSTestUtil.getDatanodeCapacity(dm, 0);
File dn1Vol1 = new File(dataDir, "data"+(2*0+1));
File dn2Vol1 = new File(dataDir, "data"+(2*1+1));
File dn3Vol1 = new File(dataDir, "data"+(2*2+1));
File dn3Vol2 = new File(dataDir, "data"+(2*2+2));
/*
* Make the 1st volume directories on the first two datanodes
* non-accessible. We don't make all three 1st volume directories
* readonly since that would cause the entire pipeline to
* fail. The client does not retry failed nodes even though
* perhaps they could succeed because just a single volume failed.
*/
DataNodeTestUtils.injectDataDirFailure(dn1Vol1, dn2Vol1);
/*
* Create file1 and wait for 3 replicas (ie all DNs can still
* store a block). Then assert that all DNs are up, despite the
* volume failures.
*/
Path file1 = new Path("/test1");
DFSTestUtil.createFile(fs, file1, 1024, (short)3, 1L);
DFSTestUtil.waitReplication(fs, file1, (short)3);
ArrayList<DataNode> dns = cluster.getDataNodes();
assertTrue("DN1 should be up", dns.get(0).isDatanodeUp());
assertTrue("DN2 should be up", dns.get(1).isDatanodeUp());
assertTrue("DN3 should be up", dns.get(2).isDatanodeUp());
/*
* The metrics should confirm the volume failures.
*/
checkFailuresAtDataNode(dns.get(0), 1, true, dn1Vol1.getAbsolutePath());
checkFailuresAtDataNode(dns.get(1), 1, true, dn2Vol1.getAbsolutePath());
checkFailuresAtDataNode(dns.get(2), 0, true);
// Ensure we wait a sufficient amount of time
assert (WAIT_FOR_HEARTBEATS * 10) > WAIT_FOR_DEATH;
// Eventually the NN should report two volume failures
DFSTestUtil.waitForDatanodeStatus(dm, 3, 0, 2,
origCapacity - (1*dnCapacity), WAIT_FOR_HEARTBEATS);
checkAggregateFailuresAtNameNode(true, 2);
checkFailuresAtNameNode(dm, dns.get(0), true, dn1Vol1.getAbsolutePath());
checkFailuresAtNameNode(dm, dns.get(1), true, dn2Vol1.getAbsolutePath());
checkFailuresAtNameNode(dm, dns.get(2), true);
/*
* Now fail a volume on the third datanode. We should be able to get
* three replicas since we've already identified the other failures.
*/
DataNodeTestUtils.injectDataDirFailure(dn3Vol1);
Path file2 = new Path("/test2");
DFSTestUtil.createFile(fs, file2, 1024, (short)3, 1L);
DFSTestUtil.waitReplication(fs, file2, (short)3);
assertTrue("DN3 should still be up", dns.get(2).isDatanodeUp());
checkFailuresAtDataNode(dns.get(2), 1, true, dn3Vol1.getAbsolutePath());
DataNodeTestUtils.triggerHeartbeat(dns.get(2));
checkFailuresAtNameNode(dm, dns.get(2), true, dn3Vol1.getAbsolutePath());
/*
* Once the datanodes have a chance to heartbeat their new capacity the
* total capacity should be down by three volumes (assuming the host
* did not grow or shrink the data volume while the test was running).
*/
dnCapacity = DFSTestUtil.getDatanodeCapacity(dm, 0);
DFSTestUtil.waitForDatanodeStatus(dm, 3, 0, 3,
origCapacity - (3*dnCapacity), WAIT_FOR_HEARTBEATS);
checkAggregateFailuresAtNameNode(true, 3);
checkFailuresAtNameNode(dm, dns.get(0), true, dn1Vol1.getAbsolutePath());
checkFailuresAtNameNode(dm, dns.get(1), true, dn2Vol1.getAbsolutePath());
checkFailuresAtNameNode(dm, dns.get(2), true, dn3Vol1.getAbsolutePath());
/*
* Now fail the 2nd volume on the 3rd datanode. All its volumes
* are now failed and so it should report two volume failures
* and that it's no longer up. Only wait for two replicas since
* we'll never get a third.
*/
DataNodeTestUtils.injectDataDirFailure(dn3Vol2);
Path file3 = new Path("/test3");
DFSTestUtil.createFile(fs, file3, 1024, (short)3, 1L);
DFSTestUtil.waitReplication(fs, file3, (short)2);
// The DN should consider itself dead
DFSTestUtil.waitForDatanodeDeath(dns.get(2));
// And report two failed volumes
checkFailuresAtDataNode(dns.get(2), 2, true, dn3Vol1.getAbsolutePath(),
dn3Vol2.getAbsolutePath());
// The NN considers the DN dead
DFSTestUtil.waitForDatanodeStatus(dm, 2, 1, 2,
origCapacity - (4*dnCapacity), WAIT_FOR_HEARTBEATS);
checkAggregateFailuresAtNameNode(true, 2);
checkFailuresAtNameNode(dm, dns.get(0), true, dn1Vol1.getAbsolutePath());
checkFailuresAtNameNode(dm, dns.get(1), true, dn2Vol1.getAbsolutePath());
/*
* The datanode never tries to restore the failed volume, even if
* it's subsequently repaired, but it should see this volume on
* restart, so file creation should be able to succeed after
* restoring the data directories and restarting the datanodes.
*/
DataNodeTestUtils.restoreDataDirFromFailure(
dn1Vol1, dn2Vol1, dn3Vol1, dn3Vol2);
cluster.restartDataNodes();
cluster.waitActive();
Path file4 = new Path("/test4");
DFSTestUtil.createFile(fs, file4, 1024, (short)3, 1L);
DFSTestUtil.waitReplication(fs, file4, (short)3);
/*
* Eventually the capacity should be restored to its original value,
* and that the volume failure count should be reported as zero by
* both the metrics and the NN.
*/
DFSTestUtil.waitForDatanodeStatus(dm, 3, 0, 0, origCapacity,
WAIT_FOR_HEARTBEATS);
checkAggregateFailuresAtNameNode(true, 0);
dns = cluster.getDataNodes();
checkFailuresAtNameNode(dm, dns.get(0), true);
checkFailuresAtNameNode(dm, dns.get(1), true);
checkFailuresAtNameNode(dm, dns.get(2), true);
}
/**
* Test that the NN re-learns of volume failures after restart.
*/
@Test
public void testVolFailureStatsPreservedOnNNRestart() throws Exception {
// Bring up two more datanodes that can tolerate 1 failure
cluster.startDataNodes(conf, 2, true, null, null);
cluster.waitActive();
final DatanodeManager dm = cluster.getNamesystem().getBlockManager(
).getDatanodeManager();
long origCapacity = DFSTestUtil.getLiveDatanodeCapacity(dm);
long dnCapacity = DFSTestUtil.getDatanodeCapacity(dm, 0);
// Fail the first volume on both datanodes (we have to keep the
// third healthy so one node in the pipeline will not fail).
File dn1Vol1 = new File(dataDir, "data"+(2*0+1));
File dn2Vol1 = new File(dataDir, "data"+(2*1+1));
DataNodeTestUtils.injectDataDirFailure(dn1Vol1, dn2Vol1);
Path file1 = new Path("/test1");
DFSTestUtil.createFile(fs, file1, 1024, (short)2, 1L);
DFSTestUtil.waitReplication(fs, file1, (short)2);
ArrayList<DataNode> dns = cluster.getDataNodes();
// The NN reports two volumes failures
DFSTestUtil.waitForDatanodeStatus(dm, 3, 0, 2,
origCapacity - (1*dnCapacity), WAIT_FOR_HEARTBEATS);
checkAggregateFailuresAtNameNode(true, 2);
checkFailuresAtNameNode(dm, dns.get(0), true, dn1Vol1.getAbsolutePath());
checkFailuresAtNameNode(dm, dns.get(1), true, dn2Vol1.getAbsolutePath());
// After restarting the NN it still see the two failures
cluster.restartNameNode(0);
cluster.waitActive();
DFSTestUtil.waitForDatanodeStatus(dm, 3, 0, 2,
origCapacity - (1*dnCapacity), WAIT_FOR_HEARTBEATS);
checkAggregateFailuresAtNameNode(true, 2);
checkFailuresAtNameNode(dm, dns.get(0), true, dn1Vol1.getAbsolutePath());
checkFailuresAtNameNode(dm, dns.get(1), true, dn2Vol1.getAbsolutePath());
}
@Test
public void testMultipleVolFailuresOnNode() throws Exception {
// Reinitialize the cluster, configured with 4 storage locations per DataNode
// and tolerating up to 2 failures.
tearDown();
initCluster(3, 4, 2);
// Calculate the total capacity of all the datanodes. Sleep for three seconds
// to be sure the datanodes have had a chance to heartbeat their capacities.
Thread.sleep(WAIT_FOR_HEARTBEATS);
DatanodeManager dm = cluster.getNamesystem().getBlockManager()
.getDatanodeManager();
long origCapacity = DFSTestUtil.getLiveDatanodeCapacity(dm);
long dnCapacity = DFSTestUtil.getDatanodeCapacity(dm, 0);
File dn1Vol1 = new File(dataDir, "data"+(4*0+1));
File dn1Vol2 = new File(dataDir, "data"+(4*0+2));
File dn2Vol1 = new File(dataDir, "data"+(4*1+1));
File dn2Vol2 = new File(dataDir, "data"+(4*1+2));
// Make the first two volume directories on the first two datanodes
// non-accessible.
DataNodeTestUtils.injectDataDirFailure(dn1Vol1, dn1Vol2, dn2Vol1, dn2Vol2);
// Create file1 and wait for 3 replicas (ie all DNs can still store a block).
// Then assert that all DNs are up, despite the volume failures.
Path file1 = new Path("/test1");
DFSTestUtil.createFile(fs, file1, 1024, (short)3, 1L);
DFSTestUtil.waitReplication(fs, file1, (short)3);
ArrayList<DataNode> dns = cluster.getDataNodes();
assertTrue("DN1 should be up", dns.get(0).isDatanodeUp());
assertTrue("DN2 should be up", dns.get(1).isDatanodeUp());
assertTrue("DN3 should be up", dns.get(2).isDatanodeUp());
checkFailuresAtDataNode(dns.get(0), 1, true, dn1Vol1.getAbsolutePath(),
dn1Vol2.getAbsolutePath());
checkFailuresAtDataNode(dns.get(1), 1, true, dn2Vol1.getAbsolutePath(),
dn2Vol2.getAbsolutePath());
checkFailuresAtDataNode(dns.get(2), 0, true);
// Ensure we wait a sufficient amount of time
assert (WAIT_FOR_HEARTBEATS * 10) > WAIT_FOR_DEATH;
// Eventually the NN should report four volume failures
DFSTestUtil.waitForDatanodeStatus(dm, 3, 0, 4,
origCapacity - (1*dnCapacity), WAIT_FOR_HEARTBEATS);
checkAggregateFailuresAtNameNode(true, 4);
checkFailuresAtNameNode(dm, dns.get(0), true, dn1Vol1.getAbsolutePath(),
dn1Vol2.getAbsolutePath());
checkFailuresAtNameNode(dm, dns.get(1), true, dn2Vol1.getAbsolutePath(),
dn2Vol2.getAbsolutePath());
checkFailuresAtNameNode(dm, dns.get(2), true);
}
@Test
public void testDataNodeReconfigureWithVolumeFailures() throws Exception {
// Bring up two more datanodes
cluster.startDataNodes(conf, 2, true, null, null);
cluster.waitActive();
final DatanodeManager dm = cluster.getNamesystem().getBlockManager(
).getDatanodeManager();
long origCapacity = DFSTestUtil.getLiveDatanodeCapacity(dm);
long dnCapacity = DFSTestUtil.getDatanodeCapacity(dm, 0);
// Fail the first volume on both datanodes (we have to keep the
// third healthy so one node in the pipeline will not fail).
File dn1Vol1 = new File(dataDir, "data"+(2*0+1));
File dn1Vol2 = new File(dataDir, "data"+(2*0+2));
File dn2Vol1 = new File(dataDir, "data"+(2*1+1));
File dn2Vol2 = new File(dataDir, "data"+(2*1+2));
DataNodeTestUtils.injectDataDirFailure(dn1Vol1);
DataNodeTestUtils.injectDataDirFailure(dn2Vol1);
Path file1 = new Path("/test1");
DFSTestUtil.createFile(fs, file1, 1024, (short)2, 1L);
DFSTestUtil.waitReplication(fs, file1, (short)2);
ArrayList<DataNode> dns = cluster.getDataNodes();
assertTrue("DN1 should be up", dns.get(0).isDatanodeUp());
assertTrue("DN2 should be up", dns.get(1).isDatanodeUp());
assertTrue("DN3 should be up", dns.get(2).isDatanodeUp());
checkFailuresAtDataNode(dns.get(0), 1, true, dn1Vol1.getAbsolutePath());
checkFailuresAtDataNode(dns.get(1), 1, true, dn2Vol1.getAbsolutePath());
checkFailuresAtDataNode(dns.get(2), 0, true);
// Ensure we wait a sufficient amount of time
assert (WAIT_FOR_HEARTBEATS * 10) > WAIT_FOR_DEATH;
// The NN reports two volume failures
DFSTestUtil.waitForDatanodeStatus(dm, 3, 0, 2,
origCapacity - (1*dnCapacity), WAIT_FOR_HEARTBEATS);
checkAggregateFailuresAtNameNode(true, 2);
checkFailuresAtNameNode(dm, dns.get(0), true, dn1Vol1.getAbsolutePath());
checkFailuresAtNameNode(dm, dns.get(1), true, dn2Vol1.getAbsolutePath());
// Reconfigure again to try to add back the failed volumes.
reconfigureDataNode(dns.get(0), dn1Vol1, dn1Vol2);
reconfigureDataNode(dns.get(1), dn2Vol1, dn2Vol2);
DataNodeTestUtils.triggerHeartbeat(dns.get(0));
DataNodeTestUtils.triggerHeartbeat(dns.get(1));
checkFailuresAtDataNode(dns.get(0), 1, false, dn1Vol1.getAbsolutePath());
checkFailuresAtDataNode(dns.get(1), 1, false, dn2Vol1.getAbsolutePath());
// Ensure we wait a sufficient amount of time.
assert (WAIT_FOR_HEARTBEATS * 10) > WAIT_FOR_DEATH;
// The NN reports two volume failures again.
DFSTestUtil.waitForDatanodeStatus(dm, 3, 0, 2,
origCapacity - (1*dnCapacity), WAIT_FOR_HEARTBEATS);
checkAggregateFailuresAtNameNode(false, 2);
checkFailuresAtNameNode(dm, dns.get(0), false, dn1Vol1.getAbsolutePath());
checkFailuresAtNameNode(dm, dns.get(1), false, dn2Vol1.getAbsolutePath());
// Reconfigure a third time with the failed volumes. Afterwards, we expect
// the same volume failures to be reported. (No double-counting.)
reconfigureDataNode(dns.get(0), dn1Vol1, dn1Vol2);
reconfigureDataNode(dns.get(1), dn2Vol1, dn2Vol2);
DataNodeTestUtils.triggerHeartbeat(dns.get(0));
DataNodeTestUtils.triggerHeartbeat(dns.get(1));
checkFailuresAtDataNode(dns.get(0), 1, false, dn1Vol1.getAbsolutePath());
checkFailuresAtDataNode(dns.get(1), 1, false, dn2Vol1.getAbsolutePath());
// Ensure we wait a sufficient amount of time.
assert (WAIT_FOR_HEARTBEATS * 10) > WAIT_FOR_DEATH;
// The NN reports two volume failures again.
DFSTestUtil.waitForDatanodeStatus(dm, 3, 0, 2,
origCapacity - (1*dnCapacity), WAIT_FOR_HEARTBEATS);
checkAggregateFailuresAtNameNode(false, 2);
checkFailuresAtNameNode(dm, dns.get(0), false, dn1Vol1.getAbsolutePath());
checkFailuresAtNameNode(dm, dns.get(1), false, dn2Vol1.getAbsolutePath());
// Replace failed volume with healthy volume and run reconfigure DataNode.
// The failed volume information should be cleared.
DataNodeTestUtils.restoreDataDirFromFailure(dn1Vol1, dn2Vol1);
reconfigureDataNode(dns.get(0), dn1Vol1, dn1Vol2);
reconfigureDataNode(dns.get(1), dn2Vol1, dn2Vol2);
DataNodeTestUtils.triggerHeartbeat(dns.get(0));
DataNodeTestUtils.triggerHeartbeat(dns.get(1));
checkFailuresAtDataNode(dns.get(0), 1, true);
checkFailuresAtDataNode(dns.get(1), 1, true);
DFSTestUtil.waitForDatanodeStatus(dm, 3, 0, 0,
origCapacity, WAIT_FOR_HEARTBEATS);
checkAggregateFailuresAtNameNode(true, 0);
checkFailuresAtNameNode(dm, dns.get(0), true);
checkFailuresAtNameNode(dm, dns.get(1), true);
}
/**
* Checks the NameNode for correct values of aggregate counters tracking failed
* volumes across all DataNodes.
*
* @param expectCapacityKnown if true, then expect that the capacities of the
* volumes were known before the failures, and therefore the lost capacity
* can be reported
* @param expectedVolumeFailuresTotal expected number of failed volumes
*/
private void checkAggregateFailuresAtNameNode(boolean expectCapacityKnown,
int expectedVolumeFailuresTotal) {
FSNamesystem ns = cluster.getNamesystem();
assertEquals(expectedVolumeFailuresTotal, ns.getVolumeFailuresTotal());
long expectedCapacityLost = getExpectedCapacityLost(expectCapacityKnown,
expectedVolumeFailuresTotal);
assertEquals(expectedCapacityLost, ns.getEstimatedCapacityLostTotal());
}
/**
* Checks a DataNode for correct reporting of failed volumes.
*
* @param dn DataNode to check
* @param expectedVolumeFailuresCounter metric counter value for
* VolumeFailures. The current implementation actually counts the number
* of failed disk checker cycles, which may be different from the length of
* expectedFailedVolumes if multiple disks fail in the same disk checker
* cycle
* @param expectCapacityKnown if true, then expect that the capacities of the
* volumes were known before the failures, and therefore the lost capacity
* can be reported
* @param expectedFailedVolumes expected locations of failed volumes
* @throws Exception if there is any failure
*/
private void checkFailuresAtDataNode(DataNode dn,
long expectedVolumeFailuresCounter, boolean expectCapacityKnown,
String... expectedFailedVolumes) throws Exception {
assertCounter("VolumeFailures", expectedVolumeFailuresCounter,
getMetrics(dn.getMetrics().name()));
FsDatasetSpi<?> fsd = dn.getFSDataset();
assertEquals(expectedFailedVolumes.length, fsd.getNumFailedVolumes());
assertArrayEquals(expectedFailedVolumes, fsd.getFailedStorageLocations());
if (expectedFailedVolumes.length > 0) {
assertTrue(fsd.getLastVolumeFailureDate() > 0);
long expectedCapacityLost = getExpectedCapacityLost(expectCapacityKnown,
expectedFailedVolumes.length);
assertEquals(expectedCapacityLost, fsd.getEstimatedCapacityLostTotal());
} else {
assertEquals(0, fsd.getLastVolumeFailureDate());
assertEquals(0, fsd.getEstimatedCapacityLostTotal());
}
}
/**
* Checks NameNode tracking of a particular DataNode for correct reporting of
* failed volumes.
*
* @param dm DatanodeManager to check
* @param dn DataNode to check
* @param expectCapacityKnown if true, then expect that the capacities of the
* volumes were known before the failures, and therefore the lost capacity
* can be reported
* @param expectedFailedVolumes expected locations of failed volumes
* @throws Exception if there is any failure
*/
private void checkFailuresAtNameNode(DatanodeManager dm, DataNode dn,
boolean expectCapacityKnown, String... expectedFailedVolumes)
throws Exception {
DatanodeDescriptor dd = cluster.getNamesystem().getBlockManager()
.getDatanodeManager().getDatanode(dn.getDatanodeId());
assertEquals(expectedFailedVolumes.length, dd.getVolumeFailures());
VolumeFailureSummary volumeFailureSummary = dd.getVolumeFailureSummary();
if (expectedFailedVolumes.length > 0) {
assertArrayEquals(expectedFailedVolumes, volumeFailureSummary
.getFailedStorageLocations());
assertTrue(volumeFailureSummary.getLastVolumeFailureDate() > 0);
long expectedCapacityLost = getExpectedCapacityLost(expectCapacityKnown,
expectedFailedVolumes.length);
assertEquals(expectedCapacityLost,
volumeFailureSummary.getEstimatedCapacityLostTotal());
} else {
assertNull(volumeFailureSummary);
}
}
/**
* Returns expected capacity lost for use in assertions. The return value is
* dependent on whether or not it is expected that the volume capacities were
* known prior to the failures.
*
* @param expectCapacityKnown if true, then expect that the capacities of the
* volumes were known before the failures, and therefore the lost capacity
* can be reported
* @param expectedVolumeFailuresTotal expected number of failed volumes
* @return estimated capacity lost in bytes
*/
private long getExpectedCapacityLost(boolean expectCapacityKnown,
int expectedVolumeFailuresTotal) {
return expectCapacityKnown ? expectedVolumeFailuresTotal * volumeCapacity :
0;
}
/**
* Initializes the cluster.
*
* @param numDataNodes number of datanodes
* @param storagesPerDatanode number of storage locations on each datanode
* @param failedVolumesTolerated number of acceptable volume failures
* @throws Exception if there is any failure
*/
private void initCluster(int numDataNodes, int storagesPerDatanode,
int failedVolumesTolerated) throws Exception {
conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512L);
/*
* Lower the DN heartbeat, DF rate, and recheck interval to one second
* so state about failures and datanode death propagates faster.
*/
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_DF_INTERVAL_KEY, 1000);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY,
failedVolumesTolerated);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes)
.storagesPerDatanode(storagesPerDatanode).build();
cluster.waitActive();
fs = cluster.getFileSystem();
dataDir = cluster.getDataDirectory();
long dnCapacity = DFSTestUtil.getDatanodeCapacity(
cluster.getNamesystem().getBlockManager().getDatanodeManager(), 0);
volumeCapacity = dnCapacity / cluster.getStoragesPerDatanode();
}
/**
* Reconfigure a DataNode by setting a new list of volumes.
*
* @param dn DataNode to reconfigure
* @param newVols new volumes to configure
* @throws Exception if there is any failure
*/
private static void reconfigureDataNode(DataNode dn, File... newVols)
throws Exception {
StringBuilder dnNewDataDirs = new StringBuilder();
for (File newVol: newVols) {
if (dnNewDataDirs.length() > 0) {
dnNewDataDirs.append(',');
}
dnNewDataDirs.append(newVol.getAbsolutePath());
}
try {
dn.reconfigurePropertyImpl(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
dnNewDataDirs.toString());
} catch (ReconfigurationException e) {
// This can be thrown if reconfiguration tries to use a failed volume.
// We need to swallow the exception, because some of our tests want to
// cover this case.
LOG.warn("Could not reconfigure DataNode.", e);
}
}
}
| 25,897 | 42.019934 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockScanner.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND;
import static org.apache.hadoop.hdfs.server.datanode.BlockScanner.Conf.INTERNAL_DFS_DATANODE_SCAN_PERIOD_MS;
import static org.apache.hadoop.hdfs.server.datanode.BlockScanner.Conf.INTERNAL_VOLUME_SCANNER_SCAN_RESULT_HANDLER;
import static org.apache.hadoop.hdfs.server.datanode.BlockScanner.Conf.INTERNAL_DFS_BLOCK_SCANNER_CURSOR_SAVE_INTERVAL_MS;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.Closeable;
import java.io.File;
import java.io.IOException;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Semaphore;
import com.google.common.base.Supplier;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.VolumeScanner.ScanResultHandler;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi.BlockIterator;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl;
import org.apache.hadoop.hdfs.server.datanode.VolumeScanner.Statistics;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Time;
import org.apache.log4j.Level;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class TestBlockScanner {
public static final Logger LOG =
LoggerFactory.getLogger(TestBlockScanner.class);
@Before
public void before() {
BlockScanner.Conf.allowUnitTestSettings = true;
GenericTestUtils.setLogLevel(BlockScanner.LOG, Level.ALL);
GenericTestUtils.setLogLevel(VolumeScanner.LOG, Level.ALL);
GenericTestUtils.setLogLevel(FsVolumeImpl.LOG, Level.ALL);
}
private static void disableBlockScanner(Configuration conf) {
conf.setLong(DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND, 0L);
}
private static class TestContext implements Closeable {
final int numNameServices;
final MiniDFSCluster cluster;
final DistributedFileSystem[] dfs;
final String[] bpids;
final DataNode datanode;
final BlockScanner blockScanner;
final FsDatasetSpi<? extends FsVolumeSpi> data;
final FsDatasetSpi.FsVolumeReferences volumes;
TestContext(Configuration conf, int numNameServices) throws Exception {
this.numNameServices = numNameServices;
MiniDFSCluster.Builder bld = new MiniDFSCluster.Builder(conf).
numDataNodes(1).
storagesPerDatanode(1);
if (numNameServices > 1) {
bld.nnTopology(MiniDFSNNTopology.
simpleFederatedTopology(numNameServices));
}
cluster = bld.build();
cluster.waitActive();
dfs = new DistributedFileSystem[numNameServices];
for (int i = 0; i < numNameServices; i++) {
dfs[i] = cluster.getFileSystem(i);
}
bpids = new String[numNameServices];
for (int i = 0; i < numNameServices; i++) {
bpids[i] = cluster.getNamesystem(i).getBlockPoolId();
}
datanode = cluster.getDataNodes().get(0);
blockScanner = datanode.getBlockScanner();
for (int i = 0; i < numNameServices; i++) {
dfs[i].mkdirs(new Path("/test"));
}
data = datanode.getFSDataset();
volumes = data.getFsVolumeReferences();
}
@Override
public void close() throws IOException {
volumes.close();
if (cluster != null) {
for (int i = 0; i < numNameServices; i++) {
dfs[i].delete(new Path("/test"), true);
}
cluster.shutdown();
}
}
public void createFiles(int nsIdx, int numFiles, int length)
throws Exception {
for (int blockIdx = 0; blockIdx < numFiles; blockIdx++) {
DFSTestUtil.createFile(dfs[nsIdx], getPath(blockIdx), length,
(short)1, 123L);
}
}
public Path getPath(int fileIdx) {
return new Path("/test/" + fileIdx);
}
public ExtendedBlock getFileBlock(int nsIdx, int fileIdx)
throws Exception {
return DFSTestUtil.getFirstBlock(dfs[nsIdx], getPath(fileIdx));
}
}
/**
* Test iterating through a bunch of blocks in a volume using a volume
* iterator.<p/>
*
* We will rewind the iterator when about halfway through the blocks.
*
* @param numFiles The number of files to create.
* @param maxStaleness The maximum staleness to allow with the iterator.
* @throws Exception
*/
private void testVolumeIteratorImpl(int numFiles,
long maxStaleness) throws Exception {
Configuration conf = new Configuration();
disableBlockScanner(conf);
TestContext ctx = new TestContext(conf, 1);
ctx.createFiles(0, numFiles, 1);
assertEquals(1, ctx.volumes.size());
FsVolumeSpi volume = ctx.volumes.get(0);
ExtendedBlock savedBlock = null, loadedBlock = null;
boolean testedRewind = false, testedSave = false, testedLoad = false;
int blocksProcessed = 0, savedBlocksProcessed = 0;
try {
List<BPOfferService> bpos = ctx.datanode.getAllBpOs();
assertEquals(1, bpos.size());
BlockIterator iter = volume.newBlockIterator(ctx.bpids[0], "test");
assertEquals(ctx.bpids[0], iter.getBlockPoolId());
iter.setMaxStalenessMs(maxStaleness);
while (true) {
HashSet<ExtendedBlock> blocks = new HashSet<ExtendedBlock>();
for (int blockIdx = 0; blockIdx < numFiles; blockIdx++) {
blocks.add(ctx.getFileBlock(0, blockIdx));
}
while (true) {
ExtendedBlock block = iter.nextBlock();
if (block == null) {
break;
}
blocksProcessed++;
LOG.info("BlockIterator for {} found block {}, blocksProcessed = {}",
volume, block, blocksProcessed);
if (testedSave && (savedBlock == null)) {
savedBlock = block;
}
if (testedLoad && (loadedBlock == null)) {
loadedBlock = block;
// The block that we get back right after loading the iterator
// should be the same block we got back right after saving
// the iterator.
assertEquals(savedBlock, loadedBlock);
}
boolean blockRemoved = blocks.remove(block);
assertTrue("Found unknown block " + block, blockRemoved);
if (blocksProcessed > (numFiles / 3)) {
if (!testedSave) {
LOG.info("Processed {} blocks out of {}. Saving iterator.",
blocksProcessed, numFiles);
iter.save();
testedSave = true;
savedBlocksProcessed = blocksProcessed;
}
}
if (blocksProcessed > (numFiles / 2)) {
if (!testedRewind) {
LOG.info("Processed {} blocks out of {}. Rewinding iterator.",
blocksProcessed, numFiles);
iter.rewind();
break;
}
}
if (blocksProcessed > ((2 * numFiles) / 3)) {
if (!testedLoad) {
LOG.info("Processed {} blocks out of {}. Loading iterator.",
blocksProcessed, numFiles);
iter = volume.loadBlockIterator(ctx.bpids[0], "test");
iter.setMaxStalenessMs(maxStaleness);
break;
}
}
}
if (!testedRewind) {
testedRewind = true;
blocksProcessed = 0;
LOG.info("Starting again at the beginning...");
continue;
}
if (!testedLoad) {
testedLoad = true;
blocksProcessed = savedBlocksProcessed;
LOG.info("Starting again at the load point...");
continue;
}
assertEquals(numFiles, blocksProcessed);
break;
}
} finally {
ctx.close();
}
}
@Test(timeout=60000)
public void testVolumeIteratorWithoutCaching() throws Exception {
testVolumeIteratorImpl(5, 0);
}
@Test(timeout=60000)
public void testVolumeIteratorWithCaching() throws Exception {
testVolumeIteratorImpl(600, 100);
}
@Test(timeout=60000)
public void testDisableVolumeScanner() throws Exception {
Configuration conf = new Configuration();
disableBlockScanner(conf);
TestContext ctx = new TestContext(conf, 1);
try {
Assert.assertFalse(ctx.datanode.getBlockScanner().isEnabled());
} finally {
ctx.close();
}
}
public static class TestScanResultHandler extends ScanResultHandler {
static class Info {
boolean shouldRun = false;
final Set<ExtendedBlock> badBlocks = new HashSet<ExtendedBlock>();
final Set<ExtendedBlock> goodBlocks = new HashSet<ExtendedBlock>();
long blocksScanned = 0;
Semaphore sem = null;
@Override
public String toString() {
final StringBuilder bld = new StringBuilder();
bld.append("ScanResultHandler.Info{");
bld.append("shouldRun=").append(shouldRun).append(", ");
bld.append("blocksScanned=").append(blocksScanned).append(", ");
bld.append("sem#availablePermits=").append(sem.availablePermits()).
append(", ");
bld.append("badBlocks=").append(badBlocks).append(", ");
bld.append("goodBlocks=").append(goodBlocks);
bld.append("}");
return bld.toString();
}
}
private VolumeScanner scanner;
final static ConcurrentHashMap<String, Info> infos =
new ConcurrentHashMap<String, Info>();
static Info getInfo(FsVolumeSpi volume) {
Info newInfo = new Info();
Info prevInfo = infos.
putIfAbsent(volume.getStorageID(), newInfo);
return prevInfo == null ? newInfo : prevInfo;
}
@Override
public void setup(VolumeScanner scanner) {
this.scanner = scanner;
Info info = getInfo(scanner.volume);
LOG.info("about to start scanning.");
synchronized (info) {
while (!info.shouldRun) {
try {
info.wait();
} catch (InterruptedException e) {
}
}
}
LOG.info("starting scanning.");
}
@Override
public void handle(ExtendedBlock block, IOException e) {
LOG.info("handling block {} (exception {})", block, e);
Info info = getInfo(scanner.volume);
Semaphore sem;
synchronized (info) {
sem = info.sem;
}
if (sem != null) {
try {
sem.acquire();
} catch (InterruptedException ie) {
throw new RuntimeException("interrupted");
}
}
synchronized (info) {
if (!info.shouldRun) {
throw new RuntimeException("stopping volumescanner thread.");
}
if (e == null) {
info.goodBlocks.add(block);
} else {
info.badBlocks.add(block);
}
info.blocksScanned++;
}
}
}
private void testScanAllBlocksImpl(final boolean rescan) throws Exception {
Configuration conf = new Configuration();
conf.setLong(DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND, 1048576L);
if (rescan) {
conf.setLong(INTERNAL_DFS_DATANODE_SCAN_PERIOD_MS, 100L);
} else {
conf.setLong(DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, 100L);
}
conf.set(INTERNAL_VOLUME_SCANNER_SCAN_RESULT_HANDLER,
TestScanResultHandler.class.getName());
final TestContext ctx = new TestContext(conf, 1);
final int NUM_EXPECTED_BLOCKS = 10;
ctx.createFiles(0, NUM_EXPECTED_BLOCKS, 1);
final Set<ExtendedBlock> expectedBlocks = new HashSet<ExtendedBlock>();
for (int i = 0; i < NUM_EXPECTED_BLOCKS; i++) {
expectedBlocks.add(ctx.getFileBlock(0, i));
}
TestScanResultHandler.Info info =
TestScanResultHandler.getInfo(ctx.volumes.get(0));
synchronized (info) {
info.shouldRun = true;
info.notify();
}
GenericTestUtils.waitFor(new Supplier<Boolean>(){
@Override
public Boolean get() {
TestScanResultHandler.Info info =
TestScanResultHandler.getInfo(ctx.volumes.get(0));
int numFoundBlocks = 0;
StringBuilder foundBlocksBld = new StringBuilder();
String prefix = "";
synchronized (info) {
for (ExtendedBlock block : info.goodBlocks) {
assertTrue(expectedBlocks.contains(block));
numFoundBlocks++;
foundBlocksBld.append(prefix).append(block);
prefix = ", ";
}
LOG.info("numFoundBlocks = {}. blocksScanned = {}. Found blocks {}",
numFoundBlocks, info.blocksScanned, foundBlocksBld.toString());
if (rescan) {
return (numFoundBlocks == NUM_EXPECTED_BLOCKS) &&
(info.blocksScanned >= 2 * NUM_EXPECTED_BLOCKS);
} else {
return numFoundBlocks == NUM_EXPECTED_BLOCKS;
}
}
}
}, 10, 60000);
if (!rescan) {
synchronized (info) {
assertEquals(NUM_EXPECTED_BLOCKS, info.blocksScanned);
}
Statistics stats = ctx.blockScanner.getVolumeStats(
ctx.volumes.get(0).getStorageID());
assertEquals(5 * NUM_EXPECTED_BLOCKS, stats.bytesScannedInPastHour);
assertEquals(NUM_EXPECTED_BLOCKS, stats.blocksScannedSinceRestart);
assertEquals(NUM_EXPECTED_BLOCKS, stats.blocksScannedInCurrentPeriod);
assertEquals(0, stats.scanErrorsSinceRestart);
assertEquals(1, stats.scansSinceRestart);
}
ctx.close();
}
/**
* Test scanning all blocks. Set the scan period high enough that
* we shouldn't rescan any block during this test.
*/
@Test(timeout=60000)
public void testScanAllBlocksNoRescan() throws Exception {
testScanAllBlocksImpl(false);
}
/**
* Test scanning all blocks. Set the scan period high enough that
* we should rescan all blocks at least twice during this test.
*/
@Test(timeout=60000)
public void testScanAllBlocksWithRescan() throws Exception {
testScanAllBlocksImpl(true);
}
/**
* Test that we don't scan too many blocks per second.
*/
@Test(timeout=120000)
public void testScanRateLimit() throws Exception {
Configuration conf = new Configuration();
// Limit scan bytes per second dramatically
conf.setLong(DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND, 4096L);
// Scan continuously
conf.setLong(INTERNAL_DFS_DATANODE_SCAN_PERIOD_MS, 1L);
conf.set(INTERNAL_VOLUME_SCANNER_SCAN_RESULT_HANDLER,
TestScanResultHandler.class.getName());
final TestContext ctx = new TestContext(conf, 1);
final int NUM_EXPECTED_BLOCKS = 5;
ctx.createFiles(0, NUM_EXPECTED_BLOCKS, 4096);
final TestScanResultHandler.Info info =
TestScanResultHandler.getInfo(ctx.volumes.get(0));
long startMs = Time.monotonicNow();
synchronized (info) {
info.shouldRun = true;
info.notify();
}
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
synchronized (info) {
return info.blocksScanned > 0;
}
}
}, 1, 30000);
Thread.sleep(2000);
synchronized (info) {
long endMs = Time.monotonicNow();
// Should scan no more than one block a second.
long seconds = ((endMs + 999 - startMs) / 1000);
long maxBlocksScanned = seconds * 1;
assertTrue("The number of blocks scanned is too large. Scanned " +
info.blocksScanned + " blocks; only expected to scan at most " +
maxBlocksScanned + " in " + seconds + " seconds.",
info.blocksScanned <= maxBlocksScanned);
}
ctx.close();
}
@Test(timeout=120000)
public void testCorruptBlockHandling() throws Exception {
Configuration conf = new Configuration();
conf.setLong(DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, 100L);
conf.set(INTERNAL_VOLUME_SCANNER_SCAN_RESULT_HANDLER,
TestScanResultHandler.class.getName());
final TestContext ctx = new TestContext(conf, 1);
final int NUM_EXPECTED_BLOCKS = 5;
final int CORRUPT_INDEX = 3;
ctx.createFiles(0, NUM_EXPECTED_BLOCKS, 4);
ExtendedBlock badBlock = ctx.getFileBlock(0, CORRUPT_INDEX);
ctx.cluster.corruptBlockOnDataNodes(badBlock);
final TestScanResultHandler.Info info =
TestScanResultHandler.getInfo(ctx.volumes.get(0));
synchronized (info) {
info.shouldRun = true;
info.notify();
}
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
synchronized (info) {
return info.blocksScanned == NUM_EXPECTED_BLOCKS;
}
}
}, 3, 30000);
synchronized (info) {
assertTrue(info.badBlocks.contains(badBlock));
for (int i = 0; i < NUM_EXPECTED_BLOCKS; i++) {
if (i != CORRUPT_INDEX) {
ExtendedBlock block = ctx.getFileBlock(0, i);
assertTrue(info.goodBlocks.contains(block));
}
}
}
ctx.close();
}
/**
* Test that we save the scan cursor when shutting down the datanode, and
* restart scanning from there when the datanode is restarted.
*/
@Test(timeout=120000)
public void testDatanodeCursor() throws Exception {
Configuration conf = new Configuration();
conf.setLong(DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, 100L);
conf.set(INTERNAL_VOLUME_SCANNER_SCAN_RESULT_HANDLER,
TestScanResultHandler.class.getName());
conf.setLong(INTERNAL_DFS_BLOCK_SCANNER_CURSOR_SAVE_INTERVAL_MS, 0L);
final TestContext ctx = new TestContext(conf, 1);
final int NUM_EXPECTED_BLOCKS = 10;
ctx.createFiles(0, NUM_EXPECTED_BLOCKS, 1);
final TestScanResultHandler.Info info =
TestScanResultHandler.getInfo(ctx.volumes.get(0));
synchronized (info) {
info.sem = new Semaphore(5);
info.shouldRun = true;
info.notify();
}
// Scan the first 5 blocks
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
synchronized (info) {
return info.blocksScanned == 5;
}
}
}, 3, 30000);
synchronized (info) {
assertEquals(5, info.goodBlocks.size());
assertEquals(5, info.blocksScanned);
info.shouldRun = false;
}
ctx.datanode.shutdown();
String vPath = ctx.volumes.get(0).getBasePath();
File cursorPath = new File(new File(new File(vPath, "current"),
ctx.bpids[0]), "scanner.cursor");
assertTrue("Failed to find cursor save file in " +
cursorPath.getAbsolutePath(), cursorPath.exists());
Set<ExtendedBlock> prevGoodBlocks = new HashSet<ExtendedBlock>();
synchronized (info) {
info.sem = new Semaphore(4);
prevGoodBlocks.addAll(info.goodBlocks);
info.goodBlocks.clear();
}
// The block that we were scanning when we shut down the DN won't get
// recorded.
// After restarting the datanode, we should scan the next 4 blocks.
ctx.cluster.restartDataNode(0);
synchronized (info) {
info.shouldRun = true;
info.notify();
}
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
synchronized (info) {
if (info.blocksScanned != 9) {
LOG.info("Waiting for blocksScanned to reach 9. It is at {}",
info.blocksScanned);
}
return info.blocksScanned == 9;
}
}
}, 3, 30000);
synchronized (info) {
assertEquals(4, info.goodBlocks.size());
info.goodBlocks.addAll(prevGoodBlocks);
assertEquals(9, info.goodBlocks.size());
assertEquals(9, info.blocksScanned);
}
ctx.datanode.shutdown();
// After restarting the datanode, we should not scan any more blocks.
// This is because we reached the end of the block pool earlier, and
// the scan period is much, much longer than the test time.
synchronized (info) {
info.sem = null;
info.shouldRun = false;
info.goodBlocks.clear();
}
ctx.cluster.restartDataNode(0);
synchronized (info) {
info.shouldRun = true;
info.notify();
}
Thread.sleep(3000);
synchronized (info) {
assertTrue(info.goodBlocks.isEmpty());
}
ctx.close();
}
@Test(timeout=120000)
public void testMultipleBlockPoolScanning() throws Exception {
Configuration conf = new Configuration();
conf.setLong(DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, 100L);
conf.set(INTERNAL_VOLUME_SCANNER_SCAN_RESULT_HANDLER,
TestScanResultHandler.class.getName());
final TestContext ctx = new TestContext(conf, 3);
// We scan 5 bytes per file (1 byte in file, 4 bytes of checksum)
final int BYTES_SCANNED_PER_FILE = 5;
final int NUM_FILES[] = new int[] { 1, 5, 10 };
int TOTAL_FILES = 0;
for (int i = 0; i < NUM_FILES.length; i++) {
TOTAL_FILES += NUM_FILES[i];
}
ctx.createFiles(0, NUM_FILES[0], 1);
ctx.createFiles(0, NUM_FILES[1], 1);
ctx.createFiles(0, NUM_FILES[2], 1);
// start scanning
final TestScanResultHandler.Info info =
TestScanResultHandler.getInfo(ctx.volumes.get(0));
synchronized (info) {
info.shouldRun = true;
info.notify();
}
// Wait for all the block pools to be scanned.
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
synchronized (info) {
Statistics stats = ctx.blockScanner.getVolumeStats(
ctx.volumes.get(0).getStorageID());
if (stats.scansSinceRestart < 3) {
LOG.info("Waiting for scansSinceRestart to reach 3 (it is {})",
stats.scansSinceRestart);
return false;
}
if (!stats.eof) {
LOG.info("Waiting for eof.");
return false;
}
return true;
}
}
}, 3, 30000);
Statistics stats = ctx.blockScanner.getVolumeStats(
ctx.volumes.get(0).getStorageID());
assertEquals(TOTAL_FILES, stats.blocksScannedSinceRestart);
assertEquals(BYTES_SCANNED_PER_FILE * TOTAL_FILES,
stats.bytesScannedInPastHour);
ctx.close();
}
@Test(timeout=120000)
public void testNextSorted() throws Exception {
List<String> arr = new LinkedList<String>();
arr.add("1");
arr.add("3");
arr.add("5");
arr.add("7");
Assert.assertEquals("3", FsVolumeImpl.nextSorted(arr, "2"));
Assert.assertEquals("3", FsVolumeImpl.nextSorted(arr, "1"));
Assert.assertEquals("1", FsVolumeImpl.nextSorted(arr, ""));
Assert.assertEquals("1", FsVolumeImpl.nextSorted(arr, null));
Assert.assertEquals(null, FsVolumeImpl.nextSorted(arr, "9"));
}
@Test(timeout=120000)
public void testCalculateNeededBytesPerSec() throws Exception {
// If we didn't check anything the last hour, we should scan now.
Assert.assertTrue(
VolumeScanner.calculateShouldScan("test", 100, 0, 0, 60));
// If, on average, we checked 101 bytes/s checked during the last hour,
// stop checking now.
Assert.assertFalse(VolumeScanner.
calculateShouldScan("test", 100, 101 * 3600, 1000, 5000));
// Target is 1 byte / s, but we didn't scan anything in the last minute.
// Should scan now.
Assert.assertTrue(VolumeScanner.
calculateShouldScan("test", 1, 3540, 0, 60));
// Target is 1000000 byte / s, but we didn't scan anything in the last
// minute. Should scan now.
Assert.assertTrue(VolumeScanner.
calculateShouldScan("test", 100000L, 354000000L, 0, 60));
Assert.assertFalse(VolumeScanner.
calculateShouldScan("test", 100000L, 365000000L, 0, 60));
}
/**
* Test that we can mark certain blocks as suspect, and get them quickly
* rescanned that way. See HDFS-7686 and HDFS-7548.
*/
@Test(timeout=120000)
public void testMarkSuspectBlock() throws Exception {
Configuration conf = new Configuration();
// Set a really long scan period.
conf.setLong(DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, 100L);
conf.set(INTERNAL_VOLUME_SCANNER_SCAN_RESULT_HANDLER,
TestScanResultHandler.class.getName());
conf.setLong(INTERNAL_DFS_BLOCK_SCANNER_CURSOR_SAVE_INTERVAL_MS, 0L);
final TestContext ctx = new TestContext(conf, 1);
final int NUM_EXPECTED_BLOCKS = 10;
ctx.createFiles(0, NUM_EXPECTED_BLOCKS, 1);
final TestScanResultHandler.Info info =
TestScanResultHandler.getInfo(ctx.volumes.get(0));
String storageID = ctx.volumes.get(0).getStorageID();
synchronized (info) {
info.sem = new Semaphore(4);
info.shouldRun = true;
info.notify();
}
// Scan the first 4 blocks
LOG.info("Waiting for the first 4 blocks to be scanned.");
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
synchronized (info) {
if (info.blocksScanned >= 4) {
LOG.info("info = {}. blockScanned has now reached 4.", info);
return true;
} else {
LOG.info("info = {}. Waiting for blockScanned to reach 4.", info);
return false;
}
}
}
}, 50, 30000);
// We should have scanned 4 blocks
synchronized (info) {
assertEquals("Expected 4 good blocks.", 4, info.goodBlocks.size());
info.goodBlocks.clear();
assertEquals("Expected 4 blocksScanned", 4, info.blocksScanned);
assertEquals("Did not expect bad blocks.", 0, info.badBlocks.size());
info.blocksScanned = 0;
}
ExtendedBlock first = ctx.getFileBlock(0, 0);
ctx.datanode.getBlockScanner().markSuspectBlock(storageID, first);
// When we increment the semaphore, the TestScanResultHandler will finish
// adding the block that it was scanning previously (the 5th block).
// We increment the semaphore twice so that the handler will also
// get a chance to see the suspect block which we just requested the
// VolumeScanner to process.
info.sem.release(2);
LOG.info("Waiting for 2 more blocks to be scanned.");
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
synchronized (info) {
if (info.blocksScanned >= 2) {
LOG.info("info = {}. blockScanned has now reached 2.", info);
return true;
} else {
LOG.info("info = {}. Waiting for blockScanned to reach 2.", info);
return false;
}
}
}
}, 50, 30000);
synchronized (info) {
assertTrue("Expected block " + first + " to have been scanned.",
info.goodBlocks.contains(first));
assertEquals(2, info.goodBlocks.size());
info.goodBlocks.clear();
assertEquals("Did not expect bad blocks.", 0, info.badBlocks.size());
assertEquals(2, info.blocksScanned);
info.blocksScanned = 0;
}
// Re-mark the same block as suspect.
ctx.datanode.getBlockScanner().markSuspectBlock(storageID, first);
info.sem.release(10);
LOG.info("Waiting for 5 more blocks to be scanned.");
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
synchronized (info) {
if (info.blocksScanned >= 5) {
LOG.info("info = {}. blockScanned has now reached 5.", info);
return true;
} else {
LOG.info("info = {}. Waiting for blockScanned to reach 5.", info);
return false;
}
}
}
}, 50, 30000);
synchronized (info) {
assertEquals(5, info.goodBlocks.size());
assertEquals(0, info.badBlocks.size());
assertEquals(5, info.blocksScanned);
// We should not have rescanned the "suspect block",
// because it was recently rescanned by the suspect block system.
// This is a test of the "suspect block" rate limiting.
Assert.assertFalse("We should not " +
"have rescanned block " + first + ", because it should have been " +
"in recentSuspectBlocks.", info.goodBlocks.contains(first));
info.blocksScanned = 0;
}
}
}
| 29,451 | 35.093137 | 122 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestTransferRbw.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import java.util.Collection;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSClientAdapter;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.log4j.Level;
import org.junit.Assert;
import org.junit.Test;
/** Test transferring RBW between datanodes */
public class TestTransferRbw {
private static final Log LOG = LogFactory.getLog(TestTransferRbw.class);
{
((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
}
private static final Random RAN = new Random();
private static final short REPLICATION = (short)1;
private static ReplicaBeingWritten getRbw(final DataNode datanode,
String bpid) throws InterruptedException {
return (ReplicaBeingWritten)getReplica(datanode, bpid, ReplicaState.RBW);
}
private static ReplicaInPipeline getReplica(final DataNode datanode,
final String bpid, final ReplicaState expectedState) throws InterruptedException {
final Collection<ReplicaInfo> replicas = FsDatasetTestUtil.getReplicas(
datanode.getFSDataset(), bpid);
for(int i = 0; i < 5 && replicas.size() == 0; i++) {
LOG.info("wait since replicas.size() == 0; i=" + i);
Thread.sleep(1000);
}
Assert.assertEquals(1, replicas.size());
final ReplicaInfo r = replicas.iterator().next();
Assert.assertEquals(expectedState, r.getState());
return (ReplicaInPipeline)r;
}
@Test
public void testTransferRbw() throws Exception {
final HdfsConfiguration conf = new HdfsConfiguration();
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf
).numDataNodes(REPLICATION).build();
try {
cluster.waitActive();
final DistributedFileSystem fs = cluster.getFileSystem();
//create a file, write some data and leave it open.
final Path p = new Path("/foo");
final int size = (1 << 16) + RAN.nextInt(1 << 16);
LOG.info("size = " + size);
final FSDataOutputStream out = fs.create(p, REPLICATION);
final byte[] bytes = new byte[1024];
for(int remaining = size; remaining > 0; ) {
RAN.nextBytes(bytes);
final int len = bytes.length < remaining? bytes.length: remaining;
out.write(bytes, 0, len);
out.hflush();
remaining -= len;
}
//get the RBW
final ReplicaBeingWritten oldrbw;
final DataNode newnode;
final DatanodeInfo newnodeinfo;
final String bpid = cluster.getNamesystem().getBlockPoolId();
{
final DataNode oldnode = cluster.getDataNodes().get(0);
oldrbw = getRbw(oldnode, bpid);
LOG.info("oldrbw = " + oldrbw);
//add a datanode
cluster.startDataNodes(conf, 1, true, null, null);
newnode = cluster.getDataNodes().get(REPLICATION);
final DatanodeInfo oldnodeinfo;
{
final DatanodeInfo[] datatnodeinfos = cluster.getNameNodeRpc(
).getDatanodeReport(DatanodeReportType.LIVE);
Assert.assertEquals(2, datatnodeinfos.length);
int i = 0;
for(DatanodeRegistration dnReg = newnode.getDNRegistrationForBP(bpid);
i < datatnodeinfos.length && !datatnodeinfos[i].equals(dnReg); i++);
Assert.assertTrue(i < datatnodeinfos.length);
newnodeinfo = datatnodeinfos[i];
oldnodeinfo = datatnodeinfos[1 - i];
}
//transfer RBW
final ExtendedBlock b = new ExtendedBlock(bpid, oldrbw.getBlockId(), oldrbw.getBytesAcked(),
oldrbw.getGenerationStamp());
final BlockOpResponseProto s = DFSTestUtil.transferRbw(
b, DFSClientAdapter.getDFSClient(fs), oldnodeinfo, newnodeinfo);
Assert.assertEquals(Status.SUCCESS, s.getStatus());
}
//check new rbw
final ReplicaBeingWritten newrbw = getRbw(newnode, bpid);
LOG.info("newrbw = " + newrbw);
Assert.assertEquals(oldrbw.getBlockId(), newrbw.getBlockId());
Assert.assertEquals(oldrbw.getGenerationStamp(), newrbw.getGenerationStamp());
Assert.assertEquals(oldrbw.getVisibleLength(), newrbw.getVisibleLength());
LOG.info("DONE");
} finally {
cluster.shutdown();
}
}
}
| 5,951 | 40.048276 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import java.io.File;
import java.io.FileDescriptor;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.channels.ClosedChannelException;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import javax.management.NotCompliantMBeanException;
import javax.management.ObjectName;
import javax.management.StandardMBean;
import org.apache.commons.lang.ArrayUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaInputStreams;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams;
import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetricHelper;
import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.metrics2.MetricsCollector;
import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.util.DataChecksum;
/**
* This class implements a simulated FSDataset.
*
* Blocks that are created are recorded but their data (plus their CRCs) are
* discarded.
* Fixed data is returned when blocks are read; a null CRC meta file is
* created for such data.
*
* This FSDataset does not remember any block information across its
* restarts; it does however offer an operation to inject blocks
* (See the TestInectionForSImulatedStorage()
* for a usage example of injection.
*
* Note the synchronization is coarse grained - it is at each method.
*/
public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
public final static int BYTE_MASK = 0xff;
static class Factory extends FsDatasetSpi.Factory<SimulatedFSDataset> {
@Override
public SimulatedFSDataset newInstance(DataNode datanode,
DataStorage storage, Configuration conf) throws IOException {
return new SimulatedFSDataset(datanode, storage, conf);
}
@Override
public boolean isSimulated() {
return true;
}
}
public static void setFactory(Configuration conf) {
conf.set(DFSConfigKeys.DFS_DATANODE_FSDATASET_FACTORY_KEY,
Factory.class.getName());
}
public static byte simulatedByte(Block b, long offsetInBlk) {
byte firstByte = (byte) (b.getBlockId() & BYTE_MASK);
return (byte) ((firstByte + offsetInBlk) & BYTE_MASK);
}
public static final String CONFIG_PROPERTY_CAPACITY =
"dfs.datanode.simulateddatastorage.capacity";
public static final long DEFAULT_CAPACITY = 2L<<40; // 1 terabyte
public static final String CONFIG_PROPERTY_STATE =
"dfs.datanode.simulateddatastorage.state";
private static final DatanodeStorage.State DEFAULT_STATE =
DatanodeStorage.State.NORMAL;
static final byte[] nullCrcFileData;
static {
DataChecksum checksum = DataChecksum.newDataChecksum(
DataChecksum.Type.NULL, 16*1024 );
byte[] nullCrcHeader = checksum.getHeader();
nullCrcFileData = new byte[2 + nullCrcHeader.length];
nullCrcFileData[0] = (byte) ((BlockMetadataHeader.VERSION >>> 8) & 0xff);
nullCrcFileData[1] = (byte) (BlockMetadataHeader.VERSION & 0xff);
for (int i = 0; i < nullCrcHeader.length; i++) {
nullCrcFileData[i+2] = nullCrcHeader[i];
}
}
// information about a single block
private class BInfo implements ReplicaInPipelineInterface {
final Block theBlock;
private boolean finalized = false; // if not finalized => ongoing creation
SimulatedOutputStream oStream = null;
private long bytesAcked;
private long bytesRcvd;
private boolean pinned = false;
BInfo(String bpid, Block b, boolean forWriting) throws IOException {
theBlock = new Block(b);
if (theBlock.getNumBytes() < 0) {
theBlock.setNumBytes(0);
}
if (!storage.alloc(bpid, theBlock.getNumBytes())) {
// expected length - actual length may
// be more - we find out at finalize
DataNode.LOG.warn("Lack of free storage on a block alloc");
throw new IOException("Creating block, no free space available");
}
if (forWriting) {
finalized = false;
oStream = new SimulatedOutputStream();
} else {
finalized = true;
oStream = null;
}
}
@Override
public String getStorageUuid() {
return storage.getStorageUuid();
}
@Override
synchronized public long getGenerationStamp() {
return theBlock.getGenerationStamp();
}
@Override
synchronized public long getNumBytes() {
if (!finalized) {
return bytesRcvd;
} else {
return theBlock.getNumBytes();
}
}
@Override
synchronized public void setNumBytes(long length) {
if (!finalized) {
bytesRcvd = length;
} else {
theBlock.setNumBytes(length);
}
}
synchronized SimulatedInputStream getIStream() {
if (!finalized) {
// throw new IOException("Trying to read an unfinalized block");
return new SimulatedInputStream(oStream.getLength(), theBlock);
} else {
return new SimulatedInputStream(theBlock.getNumBytes(), theBlock);
}
}
synchronized void finalizeBlock(String bpid, long finalSize)
throws IOException {
if (finalized) {
throw new IOException(
"Finalizing a block that has already been finalized" +
theBlock.getBlockId());
}
if (oStream == null) {
DataNode.LOG.error("Null oStream on unfinalized block - bug");
throw new IOException("Unexpected error on finalize");
}
if (oStream.getLength() != finalSize) {
DataNode.LOG.warn("Size passed to finalize (" + finalSize +
")does not match what was written:" + oStream.getLength());
throw new IOException(
"Size passed to finalize does not match the amount of data written");
}
// We had allocated the expected length when block was created;
// adjust if necessary
long extraLen = finalSize - theBlock.getNumBytes();
if (extraLen > 0) {
if (!storage.alloc(bpid,extraLen)) {
DataNode.LOG.warn("Lack of free storage on a block alloc");
throw new IOException("Creating block, no free space available");
}
} else {
storage.free(bpid, -extraLen);
}
theBlock.setNumBytes(finalSize);
finalized = true;
oStream = null;
return;
}
synchronized void unfinalizeBlock() throws IOException {
if (!finalized) {
throw new IOException("Unfinalized a block that's not finalized "
+ theBlock);
}
finalized = false;
oStream = new SimulatedOutputStream();
long blockLen = theBlock.getNumBytes();
oStream.setLength(blockLen);
bytesRcvd = blockLen;
bytesAcked = blockLen;
}
SimulatedInputStream getMetaIStream() {
return new SimulatedInputStream(nullCrcFileData);
}
synchronized boolean isFinalized() {
return finalized;
}
@Override
synchronized public ReplicaOutputStreams createStreams(boolean isCreate,
DataChecksum requestedChecksum) throws IOException {
if (finalized) {
throw new IOException("Trying to write to a finalized replica "
+ theBlock);
} else {
SimulatedOutputStream crcStream = new SimulatedOutputStream();
return new ReplicaOutputStreams(oStream, crcStream, requestedChecksum,
volume.isTransientStorage());
}
}
@Override
public OutputStream createRestartMetaStream() throws IOException {
return new SimulatedOutputStream();
}
@Override
synchronized public long getBlockId() {
return theBlock.getBlockId();
}
@Override
synchronized public long getVisibleLength() {
return getBytesAcked();
}
@Override
public ReplicaState getState() {
return finalized ? ReplicaState.FINALIZED : ReplicaState.RBW;
}
@Override
synchronized public long getBytesAcked() {
if (finalized) {
return theBlock.getNumBytes();
} else {
return bytesAcked;
}
}
@Override
synchronized public void setBytesAcked(long bytesAcked) {
if (!finalized) {
this.bytesAcked = bytesAcked;
}
}
@Override
public void releaseAllBytesReserved() {
}
@Override
synchronized public long getBytesOnDisk() {
if (finalized) {
return theBlock.getNumBytes();
} else {
return oStream.getLength();
}
}
@Override
public void setLastChecksumAndDataLen(long dataLength, byte[] lastChecksum) {
oStream.setLength(dataLength);
}
@Override
public ChunkChecksum getLastChecksumAndDataLen() {
return new ChunkChecksum(oStream.getLength(), null);
}
@Override
public boolean isOnTransientStorage() {
return false;
}
}
/**
* Class is used for tracking block pool storage utilization similar
* to {@link BlockPoolSlice}
*/
private static class SimulatedBPStorage {
private long used; // in bytes
long getUsed() {
return used;
}
void alloc(long amount) {
used += amount;
}
void free(long amount) {
used -= amount;
}
SimulatedBPStorage() {
used = 0;
}
}
/**
* Class used for tracking datanode level storage utilization similar
* to {@link FSVolumeSet}
*/
private static class SimulatedStorage {
private final Map<String, SimulatedBPStorage> map =
new HashMap<String, SimulatedBPStorage>();
private final long capacity; // in bytes
private final DatanodeStorage dnStorage;
synchronized long getFree() {
return capacity - getUsed();
}
long getCapacity() {
return capacity;
}
synchronized long getUsed() {
long used = 0;
for (SimulatedBPStorage bpStorage : map.values()) {
used += bpStorage.getUsed();
}
return used;
}
synchronized long getBlockPoolUsed(String bpid) throws IOException {
return getBPStorage(bpid).getUsed();
}
int getNumFailedVolumes() {
return 0;
}
synchronized boolean alloc(String bpid, long amount) throws IOException {
if (getFree() >= amount) {
getBPStorage(bpid).alloc(amount);
return true;
}
return false;
}
synchronized void free(String bpid, long amount) throws IOException {
getBPStorage(bpid).free(amount);
}
SimulatedStorage(long cap, DatanodeStorage.State state) {
capacity = cap;
dnStorage = new DatanodeStorage(
"SimulatedStorage-" + DatanodeStorage.generateUuid(),
state, StorageType.DEFAULT);
}
synchronized void addBlockPool(String bpid) {
SimulatedBPStorage bpStorage = map.get(bpid);
if (bpStorage != null) {
return;
}
map.put(bpid, new SimulatedBPStorage());
}
synchronized void removeBlockPool(String bpid) {
map.remove(bpid);
}
private SimulatedBPStorage getBPStorage(String bpid) throws IOException {
SimulatedBPStorage bpStorage = map.get(bpid);
if (bpStorage == null) {
throw new IOException("block pool " + bpid + " not found");
}
return bpStorage;
}
String getStorageUuid() {
return dnStorage.getStorageID();
}
DatanodeStorage getDnStorage() {
return dnStorage;
}
synchronized StorageReport getStorageReport(String bpid) {
return new StorageReport(dnStorage,
false, getCapacity(), getUsed(), getFree(),
map.get(bpid).getUsed());
}
}
static class SimulatedVolume implements FsVolumeSpi {
private final SimulatedStorage storage;
SimulatedVolume(final SimulatedStorage storage) {
this.storage = storage;
}
@Override
public FsVolumeReference obtainReference() throws ClosedChannelException {
return null;
}
@Override
public String getStorageID() {
return storage.getStorageUuid();
}
@Override
public String[] getBlockPoolList() {
return new String[0];
}
@Override
public long getAvailable() throws IOException {
return storage.getCapacity() - storage.getUsed();
}
@Override
public String getBasePath() {
return null;
}
@Override
public String getPath(String bpid) throws IOException {
return null;
}
@Override
public File getFinalizedDir(String bpid) throws IOException {
return null;
}
@Override
public StorageType getStorageType() {
return null;
}
@Override
public boolean isTransientStorage() {
return false;
}
@Override
public void reserveSpaceForRbw(long bytesToReserve) {
}
@Override
public void releaseLockedMemory(long bytesToRelease) {
}
@Override
public void releaseReservedSpace(long bytesToRelease) {
}
@Override
public BlockIterator newBlockIterator(String bpid, String name) {
throw new UnsupportedOperationException();
}
@Override
public BlockIterator loadBlockIterator(String bpid, String name)
throws IOException {
throw new UnsupportedOperationException();
}
@Override
public FsDatasetSpi getDataset() {
throw new UnsupportedOperationException();
}
}
private final Map<String, Map<Block, BInfo>> blockMap
= new HashMap<String, Map<Block,BInfo>>();
private final SimulatedStorage storage;
private final SimulatedVolume volume;
private final String datanodeUuid;
private final DataNode datanode;
public SimulatedFSDataset(DataStorage storage, Configuration conf) {
this(null, storage, conf);
}
public SimulatedFSDataset(DataNode datanode, DataStorage storage, Configuration conf) {
this.datanode = datanode;
if (storage != null) {
for (int i = 0; i < storage.getNumStorageDirs(); ++i) {
storage.createStorageID(storage.getStorageDir(i), false);
}
this.datanodeUuid = storage.getDatanodeUuid();
} else {
this.datanodeUuid = "SimulatedDatanode-" + DataNode.generateUuid();
}
registerMBean(datanodeUuid);
this.storage = new SimulatedStorage(
conf.getLong(CONFIG_PROPERTY_CAPACITY, DEFAULT_CAPACITY),
conf.getEnum(CONFIG_PROPERTY_STATE, DEFAULT_STATE));
this.volume = new SimulatedVolume(this.storage);
}
public synchronized void injectBlocks(String bpid,
Iterable<? extends Block> injectBlocks) throws IOException {
ExtendedBlock blk = new ExtendedBlock();
if (injectBlocks != null) {
for (Block b: injectBlocks) { // if any blocks in list is bad, reject list
if (b == null) {
throw new NullPointerException("Null blocks in block list");
}
blk.set(bpid, b);
if (isValidBlock(blk)) {
throw new IOException("Block already exists in block list");
}
}
Map<Block, BInfo> map = blockMap.get(bpid);
if (map == null) {
map = new HashMap<Block, BInfo>();
blockMap.put(bpid, map);
}
for (Block b: injectBlocks) {
BInfo binfo = new BInfo(bpid, b, false);
map.put(binfo.theBlock, binfo);
}
}
}
/** Get a map for a given block pool Id */
private Map<Block, BInfo> getMap(String bpid) throws IOException {
final Map<Block, BInfo> map = blockMap.get(bpid);
if (map == null) {
throw new IOException("Non existent blockpool " + bpid);
}
return map;
}
@Override // FsDatasetSpi
public synchronized void finalizeBlock(ExtendedBlock b) throws IOException {
final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
BInfo binfo = map.get(b.getLocalBlock());
if (binfo == null) {
throw new IOException("Finalizing a non existing block " + b);
}
binfo.finalizeBlock(b.getBlockPoolId(), b.getNumBytes());
}
@Override // FsDatasetSpi
public synchronized void unfinalizeBlock(ExtendedBlock b) throws IOException{
if (isValidRbw(b)) {
final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
map.remove(b.getLocalBlock());
}
}
synchronized BlockListAsLongs getBlockReport(String bpid) {
BlockListAsLongs.Builder report = BlockListAsLongs.builder();
final Map<Block, BInfo> map = blockMap.get(bpid);
if (map != null) {
for (BInfo b : map.values()) {
if (b.isFinalized()) {
report.add(b);
}
}
}
return report.build();
}
@Override
public synchronized Map<DatanodeStorage, BlockListAsLongs> getBlockReports(
String bpid) {
return Collections.singletonMap(storage.getDnStorage(), getBlockReport(bpid));
}
@Override // FsDatasetSpi
public List<Long> getCacheReport(String bpid) {
return new LinkedList<Long>();
}
@Override // FSDatasetMBean
public long getCapacity() {
return storage.getCapacity();
}
@Override // FSDatasetMBean
public long getDfsUsed() {
return storage.getUsed();
}
@Override // FSDatasetMBean
public long getBlockPoolUsed(String bpid) throws IOException {
return storage.getBlockPoolUsed(bpid);
}
@Override // FSDatasetMBean
public long getRemaining() {
return storage.getFree();
}
@Override // FSDatasetMBean
public int getNumFailedVolumes() {
return storage.getNumFailedVolumes();
}
@Override // FSDatasetMBean
public String[] getFailedStorageLocations() {
return null;
}
@Override // FSDatasetMBean
public long getLastVolumeFailureDate() {
return 0;
}
@Override // FSDatasetMBean
public long getEstimatedCapacityLostTotal() {
return 0;
}
@Override // FsDatasetSpi
public VolumeFailureSummary getVolumeFailureSummary() {
return new VolumeFailureSummary(ArrayUtils.EMPTY_STRING_ARRAY, 0, 0);
}
@Override // FSDatasetMBean
public long getCacheUsed() {
return 0l;
}
@Override // FSDatasetMBean
public long getCacheCapacity() {
return 0l;
}
@Override // FSDatasetMBean
public long getNumBlocksCached() {
return 0l;
}
@Override
public long getNumBlocksFailedToCache() {
return 0l;
}
@Override
public long getNumBlocksFailedToUncache() {
return 0l;
}
/**
* Get metrics from the metrics source
*
* @param collector to contain the resulting metrics snapshot
* @param all if true, return all metrics even if unchanged.
*/
@Override
public void getMetrics(MetricsCollector collector, boolean all) {
try {
DataNodeMetricHelper.getMetrics(collector, this, "SimulatedFSDataset");
} catch (Exception e){
//ignore Exceptions
}
}
@Override // FsDatasetSpi
public synchronized long getLength(ExtendedBlock b) throws IOException {
final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
BInfo binfo = map.get(b.getLocalBlock());
if (binfo == null) {
throw new IOException("Finalizing a non existing block " + b);
}
return binfo.getNumBytes();
}
@Override
@Deprecated
public Replica getReplica(String bpid, long blockId) {
final Map<Block, BInfo> map = blockMap.get(bpid);
if (map != null) {
return map.get(new Block(blockId));
}
return null;
}
@Override
public synchronized String getReplicaString(String bpid, long blockId) {
Replica r = null;
final Map<Block, BInfo> map = blockMap.get(bpid);
if (map != null) {
r = map.get(new Block(blockId));
}
return r == null? "null": r.toString();
}
@Override // FsDatasetSpi
public Block getStoredBlock(String bpid, long blkid) throws IOException {
final Map<Block, BInfo> map = blockMap.get(bpid);
if (map != null) {
BInfo binfo = map.get(new Block(blkid));
if (binfo == null) {
return null;
}
return new Block(blkid, binfo.getGenerationStamp(), binfo.getNumBytes());
}
return null;
}
@Override // FsDatasetSpi
public synchronized void invalidate(String bpid, Block[] invalidBlks)
throws IOException {
boolean error = false;
if (invalidBlks == null) {
return;
}
final Map<Block, BInfo> map = getMap(bpid);
for (Block b: invalidBlks) {
if (b == null) {
continue;
}
BInfo binfo = map.get(b);
if (binfo == null) {
error = true;
DataNode.LOG.warn("Invalidate: Missing block");
continue;
}
storage.free(bpid, binfo.getNumBytes());
map.remove(b);
if (datanode != null) {
datanode.notifyNamenodeDeletedBlock(new ExtendedBlock(bpid, b),
binfo.getStorageUuid());
}
}
if (error) {
throw new IOException("Invalidate: Missing blocks.");
}
}
@Override // FSDatasetSpi
public void cache(String bpid, long[] cacheBlks) {
throw new UnsupportedOperationException(
"SimulatedFSDataset does not support cache operation!");
}
@Override // FSDatasetSpi
public void uncache(String bpid, long[] uncacheBlks) {
throw new UnsupportedOperationException(
"SimulatedFSDataset does not support uncache operation!");
}
@Override // FSDatasetSpi
public boolean isCached(String bpid, long blockId) {
return false;
}
private BInfo getBInfo(final ExtendedBlock b) {
final Map<Block, BInfo> map = blockMap.get(b.getBlockPoolId());
return map == null? null: map.get(b.getLocalBlock());
}
@Override // {@link FsDatasetSpi}
public boolean contains(ExtendedBlock block) {
return getBInfo(block) != null;
}
/**
* Check if a block is valid.
*
* @param b The block to check.
* @param minLength The minimum length that the block must have. May be 0.
* @param state If this is null, it is ignored. If it is non-null, we
* will check that the replica has this state.
*
* @throws ReplicaNotFoundException If the replica is not found
*
* @throws UnexpectedReplicaStateException If the replica is not in the
* expected state.
*/
@Override // {@link FsDatasetSpi}
public void checkBlock(ExtendedBlock b, long minLength, ReplicaState state)
throws ReplicaNotFoundException, UnexpectedReplicaStateException {
final BInfo binfo = getBInfo(b);
if (binfo == null) {
throw new ReplicaNotFoundException(b);
}
if ((state == ReplicaState.FINALIZED && !binfo.isFinalized()) ||
(state != ReplicaState.FINALIZED && binfo.isFinalized())) {
throw new UnexpectedReplicaStateException(b,state);
}
}
@Override // FsDatasetSpi
public synchronized boolean isValidBlock(ExtendedBlock b) {
try {
checkBlock(b, 0, ReplicaState.FINALIZED);
} catch (IOException e) {
return false;
}
return true;
}
/* check if a block is created but not finalized */
@Override
public synchronized boolean isValidRbw(ExtendedBlock b) {
try {
checkBlock(b, 0, ReplicaState.RBW);
} catch (IOException e) {
return false;
}
return true;
}
@Override
public String toString() {
return getStorageInfo();
}
@Override // FsDatasetSpi
public synchronized ReplicaHandler append(
ExtendedBlock b, long newGS, long expectedBlockLen) throws IOException {
final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
BInfo binfo = map.get(b.getLocalBlock());
if (binfo == null || !binfo.isFinalized()) {
throw new ReplicaNotFoundException("Block " + b
+ " is not valid, and cannot be appended to.");
}
binfo.unfinalizeBlock();
return new ReplicaHandler(binfo, null);
}
@Override // FsDatasetSpi
public synchronized ReplicaHandler recoverAppend(
ExtendedBlock b, long newGS, long expectedBlockLen) throws IOException {
final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
BInfo binfo = map.get(b.getLocalBlock());
if (binfo == null) {
throw new ReplicaNotFoundException("Block " + b
+ " is not valid, and cannot be appended to.");
}
if (binfo.isFinalized()) {
binfo.unfinalizeBlock();
}
map.remove(b);
binfo.theBlock.setGenerationStamp(newGS);
map.put(binfo.theBlock, binfo);
return new ReplicaHandler(binfo, null);
}
@Override // FsDatasetSpi
public String recoverClose(ExtendedBlock b, long newGS, long expectedBlockLen)
throws IOException {
final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
BInfo binfo = map.get(b.getLocalBlock());
if (binfo == null) {
throw new ReplicaNotFoundException("Block " + b
+ " is not valid, and cannot be appended to.");
}
if (!binfo.isFinalized()) {
binfo.finalizeBlock(b.getBlockPoolId(), binfo.getNumBytes());
}
map.remove(b.getLocalBlock());
binfo.theBlock.setGenerationStamp(newGS);
map.put(binfo.theBlock, binfo);
return binfo.getStorageUuid();
}
@Override // FsDatasetSpi
public synchronized ReplicaHandler recoverRbw(
ExtendedBlock b, long newGS, long minBytesRcvd, long maxBytesRcvd)
throws IOException {
final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
BInfo binfo = map.get(b.getLocalBlock());
if ( binfo == null) {
throw new ReplicaNotFoundException("Block " + b
+ " does not exist, and cannot be appended to.");
}
if (binfo.isFinalized()) {
throw new ReplicaAlreadyExistsException("Block " + b
+ " is valid, and cannot be written to.");
}
map.remove(b);
binfo.theBlock.setGenerationStamp(newGS);
map.put(binfo.theBlock, binfo);
return new ReplicaHandler(binfo, null);
}
@Override // FsDatasetSpi
public synchronized ReplicaHandler createRbw(
StorageType storageType, ExtendedBlock b,
boolean allowLazyPersist) throws IOException {
return createTemporary(storageType, b);
}
@Override // FsDatasetSpi
public synchronized ReplicaHandler createTemporary(
StorageType storageType, ExtendedBlock b) throws IOException {
if (isValidBlock(b)) {
throw new ReplicaAlreadyExistsException("Block " + b +
" is valid, and cannot be written to.");
}
if (isValidRbw(b)) {
throw new ReplicaAlreadyExistsException("Block " + b +
" is being written, and cannot be written to.");
}
final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
BInfo binfo = new BInfo(b.getBlockPoolId(), b.getLocalBlock(), true);
map.put(binfo.theBlock, binfo);
return new ReplicaHandler(binfo, null);
}
synchronized InputStream getBlockInputStream(ExtendedBlock b
) throws IOException {
final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
BInfo binfo = map.get(b.getLocalBlock());
if (binfo == null) {
throw new IOException("No such Block " + b );
}
return binfo.getIStream();
}
@Override // FsDatasetSpi
public synchronized InputStream getBlockInputStream(ExtendedBlock b,
long seekOffset) throws IOException {
InputStream result = getBlockInputStream(b);
IOUtils.skipFully(result, seekOffset);
return result;
}
/** Not supported */
@Override // FsDatasetSpi
public ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, long blkoff,
long ckoff) throws IOException {
throw new IOException("Not supported");
}
@Override // FsDatasetSpi
public synchronized LengthInputStream getMetaDataInputStream(ExtendedBlock b
) throws IOException {
final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
BInfo binfo = map.get(b.getLocalBlock());
if (binfo == null) {
throw new IOException("No such Block " + b );
}
if (!binfo.finalized) {
throw new IOException("Block " + b +
" is being written, its meta cannot be read");
}
final SimulatedInputStream sin = binfo.getMetaIStream();
return new LengthInputStream(sin, sin.getLength());
}
@Override
public Set<File> checkDataDir() {
// nothing to check for simulated data set
return null;
}
@Override // FsDatasetSpi
public synchronized void adjustCrcChannelPosition(ExtendedBlock b,
ReplicaOutputStreams stream,
int checksumSize)
throws IOException {
}
/**
* Simulated input and output streams
*
*/
static private class SimulatedInputStream extends java.io.InputStream {
final long length; // bytes
int currentPos = 0;
byte[] data = null;
Block theBlock = null;
/**
* An input stream of size l with repeated bytes
* @param l size of the stream
* @param iRepeatedData byte that is repeated in the stream
*/
SimulatedInputStream(long l, Block b) {
length = l;
theBlock = b;
}
/**
* An input stream of of the supplied data
* @param iData data to construct the stream
*/
SimulatedInputStream(byte[] iData) {
data = iData;
length = data.length;
}
/**
* @return the lenght of the input stream
*/
long getLength() {
return length;
}
@Override
public int read() throws IOException {
if (currentPos >= length) {
return -1;
}
if (data !=null) {
return data[currentPos++];
} else {
return simulatedByte(theBlock, currentPos++) & BYTE_MASK;
}
}
@Override
public int read(byte[] b) throws IOException {
if (b == null) {
throw new NullPointerException();
}
if (b.length == 0) {
return 0;
}
if (currentPos >= length) { // EOF
return -1;
}
int bytesRead = (int) Math.min(b.length, length-currentPos);
if (data != null) {
System.arraycopy(data, currentPos, b, 0, bytesRead);
} else { // all data is zero
for (int i = 0; i < bytesRead; i++) {
b[i] = simulatedByte(theBlock, currentPos + i);
}
}
currentPos += bytesRead;
return bytesRead;
}
}
/**
* This class implements an output stream that merely throws its data away, but records its
* length.
*
*/
static private class SimulatedOutputStream extends OutputStream {
long length = 0;
/**
* constructor for Simulated Output Steram
*/
SimulatedOutputStream() {
}
/**
*
* @return the length of the data created so far.
*/
long getLength() {
return length;
}
/**
*/
void setLength(long length) {
this.length = length;
}
@Override
public void write(int arg0) throws IOException {
length++;
}
@Override
public void write(byte[] b) throws IOException {
length += b.length;
}
@Override
public void write(byte[] b,
int off,
int len) throws IOException {
length += len;
}
}
private ObjectName mbeanName;
/**
* Register the FSDataset MBean using the name
* "hadoop:service=DataNode,name=FSDatasetState-<storageid>"
* We use storage id for MBean name since a minicluster within a single
* Java VM may have multiple Simulated Datanodes.
*/
void registerMBean(final String storageId) {
// We wrap to bypass standard mbean naming convetion.
// This wraping can be removed in java 6 as it is more flexible in
// package naming for mbeans and their impl.
StandardMBean bean;
try {
bean = new StandardMBean(this,FSDatasetMBean.class);
mbeanName = MBeans.register("DataNode", "FSDatasetState-"+
storageId, bean);
} catch (NotCompliantMBeanException e) {
DataNode.LOG.warn("Error registering FSDatasetState MBean", e);
}
DataNode.LOG.info("Registered FSDatasetState MBean");
}
@Override
public void shutdown() {
if (mbeanName != null) MBeans.unregister(mbeanName);
}
@Override
public String getStorageInfo() {
return "Simulated FSDataset-" + datanodeUuid;
}
@Override
public boolean hasEnoughResource() {
return true;
}
@Override
public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock)
throws IOException {
ExtendedBlock b = rBlock.getBlock();
final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
BInfo binfo = map.get(b.getLocalBlock());
if (binfo == null) {
throw new IOException("No such Block " + b );
}
return new ReplicaRecoveryInfo(binfo.getBlockId(), binfo.getBytesOnDisk(),
binfo.getGenerationStamp(),
binfo.isFinalized()?ReplicaState.FINALIZED : ReplicaState.RBW);
}
@Override // FsDatasetSpi
public String updateReplicaUnderRecovery(ExtendedBlock oldBlock,
long recoveryId,
long newBlockId,
long newlength) {
// Caller does not care about the exact Storage UUID returned.
return datanodeUuid;
}
@Override // FsDatasetSpi
public long getReplicaVisibleLength(ExtendedBlock block) {
return block.getNumBytes();
}
@Override // FsDatasetSpi
public void addBlockPool(String bpid, Configuration conf) {
Map<Block, BInfo> map = new HashMap<Block, BInfo>();
blockMap.put(bpid, map);
storage.addBlockPool(bpid);
}
@Override // FsDatasetSpi
public void shutdownBlockPool(String bpid) {
blockMap.remove(bpid);
storage.removeBlockPool(bpid);
}
@Override // FsDatasetSpi
public void deleteBlockPool(String bpid, boolean force) {
return;
}
@Override
public ReplicaInPipelineInterface convertTemporaryToRbw(ExtendedBlock temporary)
throws IOException {
final Map<Block, BInfo> map = blockMap.get(temporary.getBlockPoolId());
if (map == null) {
throw new IOException("Block pool not found, temporary=" + temporary);
}
final BInfo r = map.get(temporary.getLocalBlock());
if (r == null) {
throw new IOException("Block not found, temporary=" + temporary);
} else if (r.isFinalized()) {
throw new IOException("Replica already finalized, temporary="
+ temporary + ", r=" + r);
}
return r;
}
@Override
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock b) {
throw new UnsupportedOperationException();
}
@Override
public HdfsBlocksMetadata getHdfsBlocksMetadata(String bpid, long[] blockIds)
throws IOException {
throw new UnsupportedOperationException();
}
@Override
public void enableTrash(String bpid) {
throw new UnsupportedOperationException();
}
@Override
public void clearTrash(String bpid) {
}
@Override
public boolean trashEnabled(String bpid) {
return false;
}
@Override
public void setRollingUpgradeMarker(String bpid) {
}
@Override
public void clearRollingUpgradeMarker(String bpid) {
}
@Override
public void checkAndUpdate(String bpid, long blockId, File diskFile,
File diskMetaFile, FsVolumeSpi vol) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public FsVolumeReferences getFsVolumeReferences() {
throw new UnsupportedOperationException();
}
@Override
public void addVolume(
final StorageLocation location,
final List<NamespaceInfo> nsInfos) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public DatanodeStorage getStorage(final String storageUuid) {
return storageUuid.equals(storage.getStorageUuid()) ?
storage.dnStorage :
null;
}
@Override
public StorageReport[] getStorageReports(String bpid) {
return new StorageReport[] {storage.getStorageReport(bpid)};
}
@Override
public List<FinalizedReplica> getFinalizedBlocks(String bpid) {
throw new UnsupportedOperationException();
}
@Override
public List<FinalizedReplica> getFinalizedBlocksOnPersistentStorage(String bpid) {
throw new UnsupportedOperationException();
}
@Override
public Map<String, Object> getVolumeInfoMap() {
throw new UnsupportedOperationException();
}
@Override
public FsVolumeSpi getVolume(ExtendedBlock b) {
return volume;
}
@Override
public synchronized void removeVolumes(Set<File> volumes, boolean clearFailure) {
throw new UnsupportedOperationException();
}
@Override
public void submitBackgroundSyncFileRangeRequest(ExtendedBlock block,
FileDescriptor fd, long offset, long nbytes, int flags) {
throw new UnsupportedOperationException();
}
@Override
public void onCompleteLazyPersist(String bpId, long blockId,
long creationTime, File[] savedFiles, FsVolumeSpi targetVolume) {
throw new UnsupportedOperationException();
}
@Override
public void onFailLazyPersist(String bpId, long blockId) {
throw new UnsupportedOperationException();
}
@Override
public ReplicaInfo moveBlockAcrossStorage(ExtendedBlock block,
StorageType targetStorageType) throws IOException {
// TODO Auto-generated method stub
return null;
}
@Override
public void setPinning(ExtendedBlock b) throws IOException {
blockMap.get(b.getBlockPoolId()).get(b.getLocalBlock()).pinned = true;
}
@Override
public boolean getPinning(ExtendedBlock b) throws IOException {
return blockMap.get(b.getBlockPoolId()).get(b.getLocalBlock()).pinned;
}
@Override
public boolean isDeletingBlock(String bpid, long blockId) {
throw new UnsupportedOperationException();
}
}
| 39,855 | 28.091971 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ipc.StandbyException;
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcErrorCodeProto;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.util.Time;
import org.apache.log4j.Level;
import org.junit.Before;
import org.junit.Test;
import org.mockito.ArgumentCaptor;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import com.google.common.base.Supplier;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
public class TestBPOfferService {
private static final String FAKE_BPID = "fake bpid";
private static final String FAKE_CLUSTERID = "fake cluster";
protected static final Log LOG = LogFactory.getLog(
TestBPOfferService.class);
private static final ExtendedBlock FAKE_BLOCK =
new ExtendedBlock(FAKE_BPID, 12345L);
private static final File TEST_BUILD_DATA = PathUtils.getTestDir(TestBPOfferService.class);
private long firstCallTime = 0;
private long secondCallTime = 0;
static {
((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
}
private DatanodeProtocolClientSideTranslatorPB mockNN1;
private DatanodeProtocolClientSideTranslatorPB mockNN2;
private final NNHAStatusHeartbeat[] mockHaStatuses = new NNHAStatusHeartbeat[2];
private final int[] heartbeatCounts = new int[2];
private DataNode mockDn;
private FsDatasetSpi<?> mockFSDataset;
@Before
public void setupMocks() throws Exception {
mockNN1 = setupNNMock(0);
mockNN2 = setupNNMock(1);
// Set up a mock DN with the bare-bones configuration
// objects, etc.
mockDn = Mockito.mock(DataNode.class);
Mockito.doReturn(true).when(mockDn).shouldRun();
Configuration conf = new Configuration();
File dnDataDir = new File(new File(TEST_BUILD_DATA, "dfs"), "data");
conf.set(DFS_DATANODE_DATA_DIR_KEY, dnDataDir.toURI().toString());
Mockito.doReturn(conf).when(mockDn).getConf();
Mockito.doReturn(new DNConf(conf)).when(mockDn).getDnConf();
Mockito.doReturn(DataNodeMetrics.create(conf, "fake dn"))
.when(mockDn).getMetrics();
// Set up a simulated dataset with our fake BP
mockFSDataset = Mockito.spy(new SimulatedFSDataset(null, conf));
mockFSDataset.addBlockPool(FAKE_BPID, conf);
// Wire the dataset to the DN.
Mockito.doReturn(mockFSDataset).when(mockDn).getFSDataset();
}
/**
* Set up a mock NN with the bare minimum for a DN to register to it.
*/
private DatanodeProtocolClientSideTranslatorPB setupNNMock(int nnIdx)
throws Exception {
DatanodeProtocolClientSideTranslatorPB mock =
Mockito.mock(DatanodeProtocolClientSideTranslatorPB.class);
Mockito.doReturn(new NamespaceInfo(1, FAKE_CLUSTERID, FAKE_BPID, 0))
.when(mock).versionRequest();
Mockito.doReturn(DFSTestUtil.getLocalDatanodeRegistration())
.when(mock).registerDatanode(Mockito.any(DatanodeRegistration.class));
Mockito.doAnswer(new HeartbeatAnswer(nnIdx))
.when(mock).sendHeartbeat(
Mockito.any(DatanodeRegistration.class),
Mockito.any(StorageReport[].class),
Mockito.anyLong(),
Mockito.anyLong(),
Mockito.anyInt(),
Mockito.anyInt(),
Mockito.anyInt(),
Mockito.any(VolumeFailureSummary.class),
Mockito.anyBoolean());
mockHaStatuses[nnIdx] = new NNHAStatusHeartbeat(HAServiceState.STANDBY, 0);
return mock;
}
/**
* Mock answer for heartbeats which returns an empty set of commands
* and the HA status for the chosen NN from the
* {@link TestBPOfferService#mockHaStatuses} array.
*/
private class HeartbeatAnswer implements Answer<HeartbeatResponse> {
private final int nnIdx;
public HeartbeatAnswer(int nnIdx) {
this.nnIdx = nnIdx;
}
@Override
public HeartbeatResponse answer(InvocationOnMock invocation) throws Throwable {
heartbeatCounts[nnIdx]++;
return new HeartbeatResponse(new DatanodeCommand[0],
mockHaStatuses[nnIdx], null,
ThreadLocalRandom.current().nextLong() | 1L);
}
}
/**
* Test that the BPOS can register to talk to two different NNs,
* sends block reports to both, etc.
*/
@Test
public void testBasicFunctionality() throws Exception {
BPOfferService bpos = setupBPOSForNNs(mockNN1, mockNN2);
bpos.start();
try {
waitForInitialization(bpos);
// The DN should have register to both NNs.
Mockito.verify(mockNN1).registerDatanode(
Mockito.any(DatanodeRegistration.class));
Mockito.verify(mockNN2).registerDatanode(
Mockito.any(DatanodeRegistration.class));
// Should get block reports from both NNs
waitForBlockReport(mockNN1);
waitForBlockReport(mockNN2);
// When we receive a block, it should report it to both NNs
bpos.notifyNamenodeReceivedBlock(FAKE_BLOCK, "", "");
ReceivedDeletedBlockInfo[] ret = waitForBlockReceived(FAKE_BLOCK, mockNN1);
assertEquals(1, ret.length);
assertEquals(FAKE_BLOCK.getLocalBlock(), ret[0].getBlock());
ret = waitForBlockReceived(FAKE_BLOCK, mockNN2);
assertEquals(1, ret.length);
assertEquals(FAKE_BLOCK.getLocalBlock(), ret[0].getBlock());
} finally {
bpos.stop();
}
}
/**
* Test that DNA_INVALIDATE commands from the standby are ignored.
*/
@Test
public void testIgnoreDeletionsFromNonActive() throws Exception {
BPOfferService bpos = setupBPOSForNNs(mockNN1, mockNN2);
// Ask to invalidate FAKE_BLOCK when block report hits the
// standby
Mockito.doReturn(new BlockCommand(DatanodeProtocol.DNA_INVALIDATE,
FAKE_BPID, new Block[] { FAKE_BLOCK.getLocalBlock() }))
.when(mockNN2).blockReport(
Mockito.<DatanodeRegistration>anyObject(),
Mockito.eq(FAKE_BPID),
Mockito.<StorageBlockReport[]>anyObject(),
Mockito.<BlockReportContext>anyObject());
bpos.start();
try {
waitForInitialization(bpos);
// Should get block reports from both NNs
waitForBlockReport(mockNN1);
waitForBlockReport(mockNN2);
} finally {
bpos.stop();
}
// Should ignore the delete command from the standby
Mockito.verify(mockFSDataset, Mockito.never())
.invalidate(Mockito.eq(FAKE_BPID),
(Block[]) Mockito.anyObject());
}
/**
* Ensure that, if the two NNs configured for a block pool
* have different block pool IDs, they will refuse to both
* register.
*/
@Test
public void testNNsFromDifferentClusters() throws Exception {
Mockito
.doReturn(new NamespaceInfo(1, "fake foreign cluster", FAKE_BPID, 0))
.when(mockNN1).versionRequest();
BPOfferService bpos = setupBPOSForNNs(mockNN1, mockNN2);
bpos.start();
try {
waitForOneToFail(bpos);
} finally {
bpos.stop();
}
}
/**
* Test that the DataNode determines the active NameNode correctly
* based on the HA-related information in heartbeat responses.
* See HDFS-2627.
*/
@Test
public void testPickActiveNameNode() throws Exception {
BPOfferService bpos = setupBPOSForNNs(mockNN1, mockNN2);
bpos.start();
try {
waitForInitialization(bpos);
// Should start with neither NN as active.
assertNull(bpos.getActiveNN());
// Have NN1 claim active at txid 1
mockHaStatuses[0] = new NNHAStatusHeartbeat(HAServiceState.ACTIVE, 1);
bpos.triggerHeartbeatForTests();
assertSame(mockNN1, bpos.getActiveNN());
// NN2 claims active at a higher txid
mockHaStatuses[1] = new NNHAStatusHeartbeat(HAServiceState.ACTIVE, 2);
bpos.triggerHeartbeatForTests();
assertSame(mockNN2, bpos.getActiveNN());
// Even after another heartbeat from the first NN, it should
// think NN2 is active, since it claimed a higher txid
bpos.triggerHeartbeatForTests();
assertSame(mockNN2, bpos.getActiveNN());
// Even if NN2 goes to standby, DN shouldn't reset to talking to NN1,
// because NN1's txid is lower than the last active txid. Instead,
// it should consider neither active.
mockHaStatuses[1] = new NNHAStatusHeartbeat(HAServiceState.STANDBY, 2);
bpos.triggerHeartbeatForTests();
assertNull(bpos.getActiveNN());
// Now if NN1 goes back to a higher txid, it should be considered active
mockHaStatuses[0] = new NNHAStatusHeartbeat(HAServiceState.ACTIVE, 3);
bpos.triggerHeartbeatForTests();
assertSame(mockNN1, bpos.getActiveNN());
} finally {
bpos.stop();
}
}
/**
* Test datanode block pool initialization error handling.
* Failure in initializing a block pool should not cause NPE.
*/
@Test
public void testBPInitErrorHandling() throws Exception {
final DataNode mockDn = Mockito.mock(DataNode.class);
Mockito.doReturn(true).when(mockDn).shouldRun();
Configuration conf = new Configuration();
File dnDataDir = new File(
new File(TEST_BUILD_DATA, "testBPInitErrorHandling"), "data");
conf.set(DFS_DATANODE_DATA_DIR_KEY, dnDataDir.toURI().toString());
Mockito.doReturn(conf).when(mockDn).getConf();
Mockito.doReturn(new DNConf(conf)).when(mockDn).getDnConf();
Mockito.doReturn(DataNodeMetrics.create(conf, "fake dn")).
when(mockDn).getMetrics();
final AtomicInteger count = new AtomicInteger();
Mockito.doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
if (count.getAndIncrement() == 0) {
throw new IOException("faked initBlockPool exception");
}
// The initBlockPool is called again. Now mock init is done.
Mockito.doReturn(mockFSDataset).when(mockDn).getFSDataset();
return null;
}
}).when(mockDn).initBlockPool(Mockito.any(BPOfferService.class));
BPOfferService bpos = setupBPOSForNNs(mockDn, mockNN1, mockNN2);
List<BPServiceActor> actors = bpos.getBPServiceActors();
assertEquals(2, actors.size());
bpos.start();
try {
waitForInitialization(bpos);
// even if one of the actor initialization fails, the other one will be
// finish block report.
waitForBlockReport(mockNN1, mockNN2);
} finally {
bpos.stop();
}
}
private void waitForOneToFail(final BPOfferService bpos)
throws Exception {
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
List<BPServiceActor> actors = bpos.getBPServiceActors();
int failedcount = 0;
for (BPServiceActor actor : actors) {
if (!actor.isAlive()) {
failedcount++;
}
}
return failedcount == 1;
}
}, 100, 10000);
}
/**
* Create a BPOfferService which registers with and heartbeats with the
* specified namenode proxy objects.
* @throws IOException
*/
private BPOfferService setupBPOSForNNs(
DatanodeProtocolClientSideTranslatorPB ... nns) throws IOException {
return setupBPOSForNNs(mockDn, nns);
}
private BPOfferService setupBPOSForNNs(DataNode mockDn,
DatanodeProtocolClientSideTranslatorPB ... nns) throws IOException {
// Set up some fake InetAddresses, then override the connectToNN
// function to return the corresponding proxies.
final Map<InetSocketAddress, DatanodeProtocolClientSideTranslatorPB> nnMap = Maps.newLinkedHashMap();
for (int port = 0; port < nns.length; port++) {
nnMap.put(new InetSocketAddress(port), nns[port]);
Mockito.doReturn(nns[port]).when(mockDn).connectToNN(
Mockito.eq(new InetSocketAddress(port)));
}
return new BPOfferService(Lists.newArrayList(nnMap.keySet()), mockDn);
}
private void waitForInitialization(final BPOfferService bpos)
throws Exception {
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
return bpos.isAlive() && bpos.isInitialized();
}
}, 100, 10000);
}
private void waitForBlockReport(final DatanodeProtocolClientSideTranslatorPB mockNN)
throws Exception {
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
try {
Mockito.verify(mockNN).blockReport(
Mockito.<DatanodeRegistration>anyObject(),
Mockito.eq(FAKE_BPID),
Mockito.<StorageBlockReport[]>anyObject(),
Mockito.<BlockReportContext>anyObject());
return true;
} catch (Throwable t) {
LOG.info("waiting on block report: " + t.getMessage());
return false;
}
}
}, 500, 10000);
}
private void waitForBlockReport(
final DatanodeProtocolClientSideTranslatorPB mockNN1,
final DatanodeProtocolClientSideTranslatorPB mockNN2)
throws Exception {
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
return get(mockNN1) || get(mockNN2);
}
private Boolean get(DatanodeProtocolClientSideTranslatorPB mockNN) {
try {
Mockito.verify(mockNN).blockReport(
Mockito.<DatanodeRegistration>anyObject(),
Mockito.eq(FAKE_BPID),
Mockito.<StorageBlockReport[]>anyObject(),
Mockito.<BlockReportContext>anyObject());
return true;
} catch (Throwable t) {
LOG.info("waiting on block report: " + t.getMessage());
return false;
}
}
}, 500, 10000);
}
private ReceivedDeletedBlockInfo[] waitForBlockReceived(
final ExtendedBlock fakeBlock,
final DatanodeProtocolClientSideTranslatorPB mockNN) throws Exception {
final String fakeBlockPoolId = fakeBlock.getBlockPoolId();
final ArgumentCaptor<StorageReceivedDeletedBlocks[]> captor =
ArgumentCaptor.forClass(StorageReceivedDeletedBlocks[].class);
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
try {
Mockito.verify(mockNN).blockReceivedAndDeleted(
Mockito.<DatanodeRegistration>anyObject(),
Mockito.eq(fakeBlockPoolId),
captor.capture());
return true;
} catch (Throwable t) {
return false;
}
}
}, 100, 10000);
return captor.getValue()[0].getBlocks();
}
private void setTimeForSynchronousBPOSCalls() {
if (firstCallTime == 0) {
firstCallTime = Time.now();
} else {
secondCallTime = Time.now();
}
}
private class BPOfferServiceSynchronousCallAnswer implements Answer<Void> {
private final int nnIdx;
public BPOfferServiceSynchronousCallAnswer(int nnIdx) {
this.nnIdx = nnIdx;
}
// For active namenode we will record the processTime and for standby
// namenode we will sleep for 5 seconds (This will simulate the situation
// where the standby namenode is down ) .
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
if (nnIdx == 0) {
setTimeForSynchronousBPOSCalls();
} else {
Thread.sleep(5000);
}
return null;
}
}
/**
* This test case test the {@link BPOfferService#reportBadBlocks} method
* such that if call to standby namenode times out then that should not
* affect the active namenode heartbeat processing since this function
* are in writeLock.
* @throws Exception
*/
@Test
public void testReportBadBlockWhenStandbyNNTimesOut() throws Exception {
BPOfferService bpos = setupBPOSForNNs(mockNN1, mockNN2);
bpos.start();
try {
waitForInitialization(bpos);
// Should start with neither NN as active.
assertNull(bpos.getActiveNN());
// Have NN1 claim active at txid 1
mockHaStatuses[0] = new NNHAStatusHeartbeat(HAServiceState.ACTIVE, 1);
bpos.triggerHeartbeatForTests();
// Now mockNN1 is acting like active namenode and mockNN2 as Standby
assertSame(mockNN1, bpos.getActiveNN());
Mockito.doAnswer(new BPOfferServiceSynchronousCallAnswer(0))
.when(mockNN1).reportBadBlocks(Mockito.any(LocatedBlock[].class));
Mockito.doAnswer(new BPOfferServiceSynchronousCallAnswer(1))
.when(mockNN2).reportBadBlocks(Mockito.any(LocatedBlock[].class));
bpos.reportBadBlocks(FAKE_BLOCK, mockFSDataset.getVolume(FAKE_BLOCK)
.getStorageID(), mockFSDataset.getVolume(FAKE_BLOCK)
.getStorageType());
bpos.reportBadBlocks(FAKE_BLOCK, mockFSDataset.getVolume(FAKE_BLOCK)
.getStorageID(), mockFSDataset.getVolume(FAKE_BLOCK)
.getStorageType());
Thread.sleep(10000);
long difference = secondCallTime - firstCallTime;
assertTrue("Active namenode reportBadBlock processing should be "
+ "independent of standby namenode reportBadBlock processing ",
difference < 5000);
} finally {
bpos.stop();
}
}
/**
* This test case test the {@link BPOfferService#trySendErrorReport} method
* such that if call to standby namenode times out then that should not
* affect the active namenode heartbeat processing since this function
* are in writeLock.
* @throws Exception
*/
@Test
public void testTrySendErrorReportWhenStandbyNNTimesOut() throws Exception {
BPOfferService bpos = setupBPOSForNNs(mockNN1, mockNN2);
bpos.start();
try {
waitForInitialization(bpos);
// Should start with neither NN as active.
assertNull(bpos.getActiveNN());
// Have NN1 claim active at txid 1
mockHaStatuses[0] = new NNHAStatusHeartbeat(HAServiceState.ACTIVE, 1);
bpos.triggerHeartbeatForTests();
// Now mockNN1 is acting like active namenode and mockNN2 as Standby
assertSame(mockNN1, bpos.getActiveNN());
Mockito.doAnswer(new BPOfferServiceSynchronousCallAnswer(0))
.when(mockNN1).errorReport(Mockito.any(DatanodeRegistration.class),
Mockito.anyInt(), Mockito.anyString());
Mockito.doAnswer(new BPOfferServiceSynchronousCallAnswer(1))
.when(mockNN2).errorReport(Mockito.any(DatanodeRegistration.class),
Mockito.anyInt(), Mockito.anyString());
String errorString = "Can't send invalid block " + FAKE_BLOCK;
bpos.trySendErrorReport(DatanodeProtocol.INVALID_BLOCK, errorString);
bpos.trySendErrorReport(DatanodeProtocol.INVALID_BLOCK, errorString);
Thread.sleep(10000);
long difference = secondCallTime - firstCallTime;
assertTrue("Active namenode trySendErrorReport processing "
+ "should be independent of standby namenode trySendErrorReport"
+ " processing ", difference < 5000);
} finally {
bpos.stop();
}
}
/**
* This test case tests whether the {@BPServiceActor#processQueueMessages}
* adds back the error report back to the queue when
* {BPServiceActorAction#reportTo} throws an IOException
* @throws Exception
*/
@Test
public void testTrySendErrorReportWhenNNThrowsIOException()
throws Exception {
BPOfferService bpos = setupBPOSForNNs(mockNN1, mockNN2);
bpos.start();
try {
waitForInitialization(bpos);
// Should start with neither NN as active.
assertNull(bpos.getActiveNN());
// Have NN1 claim active at txid 1
mockHaStatuses[0] = new NNHAStatusHeartbeat(HAServiceState.ACTIVE, 1);
bpos.triggerHeartbeatForTests();
// Now mockNN1 is acting like active namenode and mockNN2 as Standby
assertSame(mockNN1, bpos.getActiveNN());
Mockito.doAnswer(new Answer<Void>() {
// Throw an IOException when this function is first called which will
// in turn add that errorReport back to the bpThreadQueue and let it
// process the next time.
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
if (firstCallTime == 0) {
firstCallTime = Time.now();
throw new IOException();
} else {
secondCallTime = Time.now();
return null;
}
}
}).when(mockNN1).errorReport(Mockito.any(DatanodeRegistration.class),
Mockito.anyInt(), Mockito.anyString());
String errorString = "Can't send invalid block " + FAKE_BLOCK;
bpos.trySendErrorReport(DatanodeProtocol.INVALID_BLOCK, errorString);
Thread.sleep(10000);
assertTrue("Active namenode didn't add the report back to the queue "
+ "when errorReport threw IOException", secondCallTime != 0);
} finally {
bpos.stop();
}
}
/**
* This test case doesn't add the reportBadBlock request to
* {@link BPServiceActor#bpThreadEnqueue} when the Standby namenode throws
* {@link StandbyException}
* @throws Exception
*/
@Test
public void testReportBadBlocksWhenNNThrowsStandbyException()
throws Exception {
BPOfferService bpos = setupBPOSForNNs(mockNN1, mockNN2);
bpos.start();
try {
waitForInitialization(bpos);
// Should start with neither NN as active.
assertNull(bpos.getActiveNN());
// Have NN1 claim active at txid 1
mockHaStatuses[0] = new NNHAStatusHeartbeat(HAServiceState.ACTIVE, 1);
bpos.triggerHeartbeatForTests();
// Now mockNN1 is acting like active namenode and mockNN2 as Standby
assertSame(mockNN1, bpos.getActiveNN());
// Return nothing when active Active Namenode calls reportBadBlocks
Mockito.doNothing().when(mockNN1).reportBadBlocks
(Mockito.any(LocatedBlock[].class));
RemoteException re = new RemoteException(StandbyException.class.
getName(), "Operation category WRITE is not supported in state "
+ "standby", RpcErrorCodeProto.ERROR_APPLICATION);
// Return StandbyException wrapped in RemoteException when Standby NN
// calls reportBadBlocks
Mockito.doThrow(re).when(mockNN2).reportBadBlocks
(Mockito.any(LocatedBlock[].class));
bpos.reportBadBlocks(FAKE_BLOCK, mockFSDataset.getVolume(FAKE_BLOCK)
.getStorageID(), mockFSDataset.getVolume(FAKE_BLOCK)
.getStorageType());
// Send heartbeat so that the BpServiceActor can report bad block to
// namenode
bpos.triggerHeartbeatForTests();
Mockito.verify(mockNN2, Mockito.times(1))
.reportBadBlocks(Mockito.any(LocatedBlock[].class));
// Trigger another heartbeat, this will send reportBadBlock again if it
// is present in the queue.
bpos.triggerHeartbeatForTests();
Mockito.verify(mockNN2, Mockito.times(1))
.reportBadBlocks(Mockito.any(LocatedBlock[].class));
} finally {
bpos.stop();
}
}
}
| 25,865 | 36.926686 | 105 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockHasMultipleReplicasOnSameDN.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import java.io.IOException;
import java.util.ArrayList;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.*;
import org.apache.hadoop.hdfs.protocol.*;
import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import static org.hamcrest.CoreMatchers.not;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertThat;
/**
* This test verifies NameNode behavior when it gets unexpected block reports
* from DataNodes. The same block is reported by two different storages on
* the same DataNode. Excess replicas on the same DN should be ignored by the NN.
*/
public class TestBlockHasMultipleReplicasOnSameDN {
public static final Log LOG = LogFactory.getLog(TestBlockHasMultipleReplicasOnSameDN.class);
private static final short NUM_DATANODES = 2;
private static final int BLOCK_SIZE = 1024;
private static final long NUM_BLOCKS = 5;
private static final long seed = 0x1BADF00DL;
private Configuration conf;
private MiniDFSCluster cluster;
private DistributedFileSystem fs;
private DFSClient client;
private String bpid;
@Before
public void startUpCluster() throws IOException {
conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_DATANODES)
.build();
fs = cluster.getFileSystem();
client = fs.getClient();
bpid = cluster.getNamesystem().getBlockPoolId();
}
@After
public void shutDownCluster() throws IOException {
if (cluster != null) {
fs.close();
cluster.shutdown();
cluster = null;
}
}
private String makeFileName(String prefix) {
return "/" + prefix + ".dat";
}
/**
* Verify NameNode behavior when a given DN reports multiple replicas
* of a given block.
*/
@Test
public void testBlockHasMultipleReplicasOnSameDN() throws IOException {
String filename = makeFileName(GenericTestUtils.getMethodName());
Path filePath = new Path(filename);
// Write out a file with a few blocks.
DFSTestUtil.createFile(fs, filePath, BLOCK_SIZE, BLOCK_SIZE * NUM_BLOCKS,
BLOCK_SIZE, NUM_DATANODES, seed);
// Get the block list for the file with the block locations.
LocatedBlocks locatedBlocks = client.getLocatedBlocks(
filePath.toString(), 0, BLOCK_SIZE * NUM_BLOCKS);
// Generate a fake block report from one of the DataNodes, such
// that it reports one copy of each block on either storage.
DataNode dn = cluster.getDataNodes().get(0);
DatanodeRegistration dnReg = dn.getDNRegistrationForBP(bpid);
StorageBlockReport reports[] =
new StorageBlockReport[cluster.getStoragesPerDatanode()];
ArrayList<Replica> blocks = new ArrayList<Replica>();
for (LocatedBlock locatedBlock : locatedBlocks.getLocatedBlocks()) {
Block localBlock = locatedBlock.getBlock().getLocalBlock();
blocks.add(new FinalizedReplica(localBlock, null, null));
}
try (FsDatasetSpi.FsVolumeReferences volumes =
dn.getFSDataset().getFsVolumeReferences()) {
BlockListAsLongs bll = BlockListAsLongs.encode(blocks);
for (int i = 0; i < cluster.getStoragesPerDatanode(); ++i) {
DatanodeStorage dns = new DatanodeStorage(volumes.get(i).getStorageID());
reports[i] = new StorageBlockReport(dns, bll);
}
}
// Should not assert!
cluster.getNameNodeRpc().blockReport(dnReg, bpid, reports,
new BlockReportContext(1, 0, System.nanoTime(), 0L));
// Get the block locations once again.
locatedBlocks = client.getLocatedBlocks(filename, 0, BLOCK_SIZE * NUM_BLOCKS);
// Make sure that each block has two replicas, one on each DataNode.
for (LocatedBlock locatedBlock : locatedBlocks.getLocatedBlocks()) {
DatanodeInfo[] locations = locatedBlock.getLocations();
assertThat(locations.length, is((int) NUM_DATANODES));
assertThat(locations[0].getDatanodeUuid(), not(locations[1].getDatanodeUuid()));
}
}
}
| 5,353 | 36.704225 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolSliceStorage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.junit.Test;
import java.io.File;
import java.util.Random;
import java.util.UUID;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertThat;
/**
* Test that BlockPoolSliceStorage can correctly generate trash and
* restore directories for a given block file path.
*/
public class TestBlockPoolSliceStorage {
public static final Log LOG = LogFactory.getLog(TestBlockPoolSliceStorage.class);
final Random rand = new Random();
BlockPoolSliceStorage storage;
/**
* BlockPoolSliceStorage with a dummy storage directory. The directory
* need not exist. We need to extend BlockPoolSliceStorage so we can
* call {@link Storage#addStorageDir}.
*/
private static class StubBlockPoolSliceStorage extends BlockPoolSliceStorage {
StubBlockPoolSliceStorage(int namespaceID, String bpID, long cTime,
String clusterId) {
super(namespaceID, bpID, cTime, clusterId);
addStorageDir(new StorageDirectory(new File("/tmp/dontcare/" + bpID)));
assertThat(storageDirs.size(), is(1));
}
}
private String makeRandomIpAddress() {
return rand.nextInt(256) + "." +
rand.nextInt(256) + "." +
rand.nextInt(256) + "." +
rand.nextInt(256);
}
private String makeRandomBlockpoolId() {
return "BP-" + rand.nextInt(Integer.MAX_VALUE) +
"-" + makeRandomIpAddress() +
"-" + rand.nextInt(Integer.MAX_VALUE);
}
private BlockPoolSliceStorage makeBlockPoolStorage() {
return new StubBlockPoolSliceStorage(
rand.nextInt(Integer.MAX_VALUE),
makeRandomBlockpoolId(),
rand.nextInt(Integer.MAX_VALUE),
UUID.randomUUID().toString());
}
private String makeRandomBlockFileSubdir(int nestingLevel) {
StringBuilder sb = new StringBuilder();
sb.append(File.separator);
for (int i = 0; i < nestingLevel; ++i) {
sb.append("subdir" + rand.nextInt(64) + File.separator);
}
return sb.toString();
}
/**
* Test conversion from a block file path to its target trash
* directory.
*/
public void getTrashDirectoryForBlockFile(String fileName, int nestingLevel) {
final String blockFileSubdir = makeRandomBlockFileSubdir(nestingLevel);
final String blockFileName = fileName;
String testFilePath =
storage.getSingularStorageDir().getRoot() + File.separator +
Storage.STORAGE_DIR_CURRENT +
blockFileSubdir + blockFileName;
String expectedTrashPath =
storage.getSingularStorageDir().getRoot() + File.separator +
BlockPoolSliceStorage.TRASH_ROOT_DIR +
blockFileSubdir.substring(0, blockFileSubdir.length() - 1);
LOG.info("Got subdir " + blockFileSubdir);
LOG.info("Generated file path " + testFilePath);
assertThat(storage.getTrashDirectory(new File(testFilePath)), is(expectedTrashPath));
}
/*
* Test conversion from a block file in a trash directory to its
* target directory for restore.
*/
public void getRestoreDirectoryForBlockFile(String fileName, int nestingLevel) {
BlockPoolSliceStorage storage = makeBlockPoolStorage();
final String blockFileSubdir = makeRandomBlockFileSubdir(nestingLevel);
final String blockFileName = fileName;
String deletedFilePath =
storage.getSingularStorageDir().getRoot() + File.separator +
BlockPoolSliceStorage.TRASH_ROOT_DIR +
blockFileSubdir + blockFileName;
String expectedRestorePath =
storage.getSingularStorageDir().getRoot() + File.separator +
Storage.STORAGE_DIR_CURRENT +
blockFileSubdir.substring(0, blockFileSubdir.length() - 1);
LOG.info("Generated deleted file path " + deletedFilePath);
assertThat(storage.getRestoreDirectory(new File(deletedFilePath)),
is(expectedRestorePath));
}
@Test (timeout=300000)
public void testGetTrashAndRestoreDirectories() {
storage = makeBlockPoolStorage();
// Test a few different nesting levels since block files
// could be nested such as subdir1/subdir5/blk_...
// Make sure all nesting levels are handled correctly.
for (int i = 0; i < 3; ++i) {
getTrashDirectoryForBlockFile("blk_myblockfile", i);
getTrashDirectoryForBlockFile("blk_myblockfile.meta", i);
getRestoreDirectoryForBlockFile("blk_myblockfile", i);
getRestoreDirectoryForBlockFile("blk_myblockfile.meta", i);
}
}
}
| 5,427 | 34.94702 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestHdfsServerConstants.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.junit.Test;
/**
* Test enumerations in TestHdfsServerConstants.
*/
public class TestHdfsServerConstants {
/**
* Verify that parsing a StartupOption string gives the expected results.
* If a RollingUpgradeStartupOption is specified than it is also checked.
*
* @param value
* @param expectedOption
* @param expectedRollupOption optional, may be null.
*/
private static void verifyStartupOptionResult(String value,
StartupOption expectedOption,
RollingUpgradeStartupOption expectedRollupOption) {
StartupOption option = StartupOption.getEnum(value);
assertEquals(expectedOption, option);
if (expectedRollupOption != null) {
assertEquals(expectedRollupOption, option.getRollingUpgradeStartupOption());
}
}
/**
* Test that we can parse a StartupOption string without the optional
* RollingUpgradeStartupOption.
*/
@Test
public void testStartupOptionParsing() {
verifyStartupOptionResult("FORMAT", StartupOption.FORMAT, null);
verifyStartupOptionResult("REGULAR", StartupOption.REGULAR, null);
verifyStartupOptionResult("CHECKPOINT", StartupOption.CHECKPOINT, null);
verifyStartupOptionResult("UPGRADE", StartupOption.UPGRADE, null);
verifyStartupOptionResult("ROLLBACK", StartupOption.ROLLBACK, null);
verifyStartupOptionResult("FINALIZE", StartupOption.FINALIZE, null);
verifyStartupOptionResult("ROLLINGUPGRADE", StartupOption.ROLLINGUPGRADE, null);
verifyStartupOptionResult("IMPORT", StartupOption.IMPORT, null);
verifyStartupOptionResult("INITIALIZESHAREDEDITS", StartupOption.INITIALIZESHAREDEDITS, null);
try {
verifyStartupOptionResult("UNKNOWN(UNKNOWNOPTION)", StartupOption.FORMAT, null);
fail("Failed to get expected IllegalArgumentException");
} catch(IllegalArgumentException iae) {
// Expected!
}
}
/**
* Test that we can parse a StartupOption string with a
* RollingUpgradeStartupOption.
*/
@Test
public void testRollingUpgradeStartupOptionParsing() {
verifyStartupOptionResult("ROLLINGUPGRADE(ROLLBACK)",
StartupOption.ROLLINGUPGRADE,
RollingUpgradeStartupOption.ROLLBACK);
verifyStartupOptionResult("ROLLINGUPGRADE(DOWNGRADE)",
StartupOption.ROLLINGUPGRADE,
RollingUpgradeStartupOption.DOWNGRADE);
verifyStartupOptionResult("ROLLINGUPGRADE(STARTED)",
StartupOption.ROLLINGUPGRADE,
RollingUpgradeStartupOption.STARTED);
try {
verifyStartupOptionResult("ROLLINGUPGRADE(UNKNOWNOPTION)", StartupOption.ROLLINGUPGRADE, null);
fail("Failed to get expected IllegalArgumentException");
} catch(IllegalArgumentException iae) {
// Expected!
}
}
}
| 3,922 | 37.841584 | 101 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.File;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.tools.DFSAdmin;
import org.junit.Test;
/**
* Tests deleteBlockPool functionality.
*/
public class TestDeleteBlockPool {
@Test
public void testDeleteBlockPool() throws Exception {
// Start cluster with a 2 NN and 2 DN
Configuration conf = new Configuration();
MiniDFSCluster cluster = null;
try {
conf.set(DFSConfigKeys.DFS_NAMESERVICES,
"namesServerId1,namesServerId2");
cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleFederatedTopology
(conf.get(DFSConfigKeys.DFS_NAMESERVICES)))
.numDataNodes(2).build();
cluster.waitActive();
FileSystem fs1 = cluster.getFileSystem(0);
FileSystem fs2 = cluster.getFileSystem(1);
DFSTestUtil.createFile(fs1, new Path("/alpha"), 1024, (short) 2, 54);
DFSTestUtil.createFile(fs2, new Path("/beta"), 1024, (short) 2, 54);
DataNode dn1 = cluster.getDataNodes().get(0);
DataNode dn2 = cluster.getDataNodes().get(1);
String bpid1 = cluster.getNamesystem(0).getBlockPoolId();
String bpid2 = cluster.getNamesystem(1).getBlockPoolId();
File dn1StorageDir1 = cluster.getInstanceStorageDir(0, 0);
File dn1StorageDir2 = cluster.getInstanceStorageDir(0, 1);
File dn2StorageDir1 = cluster.getInstanceStorageDir(1, 0);
File dn2StorageDir2 = cluster.getInstanceStorageDir(1, 1);
// Although namenode is shutdown, the bp offerservice is still running
try {
dn1.deleteBlockPool(bpid1, true);
fail("Must not delete a running block pool");
} catch (IOException expected) {
}
Configuration nn1Conf = cluster.getConfiguration(1);
nn1Conf.set(DFSConfigKeys.DFS_NAMESERVICES, "namesServerId2");
dn1.refreshNamenodes(nn1Conf);
assertEquals(1, dn1.getAllBpOs().size());
try {
dn1.deleteBlockPool(bpid1, false);
fail("Must not delete if any block files exist unless "
+ "force is true");
} catch (IOException expected) {
}
verifyBlockPoolDirectories(true, dn1StorageDir1, bpid1);
verifyBlockPoolDirectories(true, dn1StorageDir2, bpid1);
dn1.deleteBlockPool(bpid1, true);
verifyBlockPoolDirectories(false, dn1StorageDir1, bpid1);
verifyBlockPoolDirectories(false, dn1StorageDir2, bpid1);
fs1.delete(new Path("/alpha"), true);
// Wait till all blocks are deleted from the dn2 for bpid1.
File finalDir1 = MiniDFSCluster.getFinalizedDir(dn2StorageDir1, bpid1);
File finalDir2 = MiniDFSCluster.getFinalizedDir(dn2StorageDir1, bpid2);
while ((!DatanodeUtil.dirNoFilesRecursive(finalDir1)) ||
(!DatanodeUtil.dirNoFilesRecursive(finalDir2))) {
try {
Thread.sleep(3000);
} catch (Exception ignored) {
}
}
cluster.shutdownNameNode(0);
// Although namenode is shutdown, the bp offerservice is still running
// on dn2
try {
dn2.deleteBlockPool(bpid1, true);
fail("Must not delete a running block pool");
} catch (IOException expected) {
}
dn2.refreshNamenodes(nn1Conf);
assertEquals(1, dn2.getAllBpOs().size());
verifyBlockPoolDirectories(true, dn2StorageDir1, bpid1);
verifyBlockPoolDirectories(true, dn2StorageDir2, bpid1);
// Now deleteBlockPool must succeed with force as false, because no
// blocks exist for bpid1 and bpOfferService is also stopped for bpid1.
dn2.deleteBlockPool(bpid1, false);
verifyBlockPoolDirectories(false, dn2StorageDir1, bpid1);
verifyBlockPoolDirectories(false, dn2StorageDir2, bpid1);
//bpid2 must not be impacted
verifyBlockPoolDirectories(true, dn1StorageDir1, bpid2);
verifyBlockPoolDirectories(true, dn1StorageDir2, bpid2);
verifyBlockPoolDirectories(true, dn2StorageDir1, bpid2);
verifyBlockPoolDirectories(true, dn2StorageDir2, bpid2);
//make sure second block pool is running all fine
Path gammaFile = new Path("/gamma");
DFSTestUtil.createFile(fs2, gammaFile, 1024, (short) 1, 55);
fs2.setReplication(gammaFile, (short)2);
DFSTestUtil.waitReplication(fs2, gammaFile, (short) 2);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test
public void testDfsAdminDeleteBlockPool() throws Exception {
Configuration conf = new Configuration();
MiniDFSCluster cluster = null;
try {
conf.set(DFSConfigKeys.DFS_NAMESERVICES,
"namesServerId1,namesServerId2");
cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleFederatedTopology(
conf.get(DFSConfigKeys.DFS_NAMESERVICES)))
.numDataNodes(1).build();
cluster.waitActive();
FileSystem fs1 = cluster.getFileSystem(0);
FileSystem fs2 = cluster.getFileSystem(1);
DFSTestUtil.createFile(fs1, new Path("/alpha"), 1024, (short) 1, 54);
DFSTestUtil.createFile(fs2, new Path("/beta"), 1024, (short) 1, 54);
DataNode dn1 = cluster.getDataNodes().get(0);
String bpid1 = cluster.getNamesystem(0).getBlockPoolId();
String bpid2 = cluster.getNamesystem(1).getBlockPoolId();
File dn1StorageDir1 = cluster.getInstanceStorageDir(0, 0);
File dn1StorageDir2 = cluster.getInstanceStorageDir(0, 1);
Configuration nn1Conf = cluster.getConfiguration(0);
nn1Conf.set(DFSConfigKeys.DFS_NAMESERVICES, "namesServerId1");
dn1.refreshNamenodes(nn1Conf);
assertEquals(1, dn1.getAllBpOs().size());
DFSAdmin admin = new DFSAdmin(nn1Conf);
String dn1Address = dn1.getDatanodeId().getIpAddr() + ":" + dn1.getIpcPort();
String[] args = { "-deleteBlockPool", dn1Address, bpid2 };
int ret = admin.run(args);
assertFalse(0 == ret);
verifyBlockPoolDirectories(true, dn1StorageDir1, bpid2);
verifyBlockPoolDirectories(true, dn1StorageDir2, bpid2);
String[] forceArgs = { "-deleteBlockPool", dn1Address, bpid2, "force" };
ret = admin.run(forceArgs);
assertEquals(0, ret);
verifyBlockPoolDirectories(false, dn1StorageDir1, bpid2);
verifyBlockPoolDirectories(false, dn1StorageDir2, bpid2);
//bpid1 remains good
verifyBlockPoolDirectories(true, dn1StorageDir1, bpid1);
verifyBlockPoolDirectories(true, dn1StorageDir2, bpid1);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
private void verifyBlockPoolDirectories(boolean shouldExist,
File storageDir, String bpid) throws IOException {
File bpDir = new File(storageDir, DataStorage.STORAGE_DIR_CURRENT + "/"
+ bpid);
if (shouldExist == false) {
assertFalse(bpDir.exists());
} else {
File bpCurrentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT);
File finalizedDir = new File(bpCurrentDir,
DataStorage.STORAGE_DIR_FINALIZED);
File rbwDir = new File(bpCurrentDir, DataStorage.STORAGE_DIR_RBW);
File versionFile = new File(bpCurrentDir, "VERSION");
assertTrue(finalizedDir.isDirectory());
assertTrue(rbwDir.isDirectory());
assertTrue(versionFile.exists());
}
}
}
| 8,717 | 35.78481 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assume.assumeTrue;
import java.io.File;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
/**
* Test the ability of a DN to tolerate volume failures.
*/
public class TestDataNodeVolumeFailureToleration {
private FileSystem fs;
private MiniDFSCluster cluster;
private Configuration conf;
private String dataDir;
// Sleep at least 3 seconds (a 1s heartbeat plus padding) to allow
// for heartbeats to propagate from the datanodes to the namenode.
final int WAIT_FOR_HEARTBEATS = 3000;
// Wait at least (2 * re-check + 10 * heartbeat) seconds for
// a datanode to be considered dead by the namenode.
final int WAIT_FOR_DEATH = 15000;
@Before
public void setUp() throws Exception {
conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512L);
/*
* Lower the DN heartbeat, DF rate, and recheck interval to one second
* so state about failures and datanode death propagates faster.
*/
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_DF_INTERVAL_KEY, 1000);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
// Allow a single volume failure (there are two volumes)
conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
fs = cluster.getFileSystem();
dataDir = cluster.getDataDirectory();
}
@After
public void tearDown() throws Exception {
cluster.shutdown();
}
/**
* Test the DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY configuration
* option, ie the DN tolerates a failed-to-use scenario during
* its start-up.
*/
@Test
public void testValidVolumesAtStartup() throws Exception {
assumeTrue(!System.getProperty("os.name").startsWith("Windows"));
// Make sure no DNs are running.
cluster.shutdownDataNodes();
// Bring up a datanode with two default data dirs, but with one bad one.
conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1);
// We use subdirectories 0 and 1 in order to have only a single
// data dir's parent inject a failure.
File tld = new File(MiniDFSCluster.getBaseDirectory(), "badData");
File dataDir1 = new File(tld, "data1");
File dataDir1Actual = new File(dataDir1, "1");
dataDir1Actual.mkdirs();
// Force an IOE to occur on one of the dfs.data.dir.
File dataDir2 = new File(tld, "data2");
prepareDirToFail(dataDir2);
File dataDir2Actual = new File(dataDir2, "2");
// Start one DN, with manually managed DN dir
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
dataDir1Actual.getPath() + "," + dataDir2Actual.getPath());
cluster.startDataNodes(conf, 1, false, null, null);
cluster.waitActive();
try {
assertTrue("The DN should have started up fine.",
cluster.isDataNodeUp());
DataNode dn = cluster.getDataNodes().get(0);
String si = DataNodeTestUtils.getFSDataset(dn).getStorageInfo();
assertTrue("The DN should have started with this directory",
si.contains(dataDir1Actual.getPath()));
assertFalse("The DN shouldn't have a bad directory.",
si.contains(dataDir2Actual.getPath()));
} finally {
cluster.shutdownDataNodes();
FileUtil.chmod(dataDir2.toString(), "755");
}
}
/**
* Test the DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY configuration
* option, ie the DN shuts itself down when the number of failures
* experienced drops below the tolerated amount.
*/
@Test
public void testConfigureMinValidVolumes() throws Exception {
assumeTrue(!System.getProperty("os.name").startsWith("Windows"));
// Bring up two additional datanodes that need both of their volumes
// functioning in order to stay up.
conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 0);
cluster.startDataNodes(conf, 2, true, null, null);
cluster.waitActive();
final DatanodeManager dm = cluster.getNamesystem().getBlockManager(
).getDatanodeManager();
long origCapacity = DFSTestUtil.getLiveDatanodeCapacity(dm);
long dnCapacity = DFSTestUtil.getDatanodeCapacity(dm, 0);
// Fail a volume on the 2nd DN
File dn2Vol1 = new File(dataDir, "data"+(2*1+1));
DataNodeTestUtils.injectDataDirFailure(dn2Vol1);
// Should only get two replicas (the first DN and the 3rd)
Path file1 = new Path("/test1");
DFSTestUtil.createFile(fs, file1, 1024, (short)3, 1L);
DFSTestUtil.waitReplication(fs, file1, (short)2);
// Check that this single failure caused a DN to die.
DFSTestUtil.waitForDatanodeStatus(dm, 2, 1, 0,
origCapacity - (1*dnCapacity), WAIT_FOR_HEARTBEATS);
// If we restore the volume we should still only be able to get
// two replicas since the DN is still considered dead.
DataNodeTestUtils.restoreDataDirFromFailure(dn2Vol1);
Path file2 = new Path("/test2");
DFSTestUtil.createFile(fs, file2, 1024, (short)3, 1L);
DFSTestUtil.waitReplication(fs, file2, (short)2);
}
/**
* Restart the datanodes with a new volume tolerated value.
* @param volTolerated number of dfs data dir failures to tolerate
* @param manageDfsDirs whether the mini cluster should manage data dirs
* @throws IOException
*/
private void restartDatanodes(int volTolerated, boolean manageDfsDirs)
throws IOException {
// Make sure no datanode is running
cluster.shutdownDataNodes();
conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, volTolerated);
cluster.startDataNodes(conf, 1, manageDfsDirs, null, null);
cluster.waitActive();
}
/**
* Test for different combination of volume configs and volumes tolerated
* values.
*/
@Test
public void testVolumeAndTolerableConfiguration() throws Exception {
// Check if Block Pool Service exit for an invalid conf value.
testVolumeConfig(-1, 0, false, true);
// Ditto if the value is too big.
testVolumeConfig(100, 0, false, true);
// Test for one failed volume
testVolumeConfig(0, 1, false, false);
// Test for one failed volume with 1 tolerable volume
testVolumeConfig(1, 1, true, false);
// Test all good volumes
testVolumeConfig(0, 0, true, false);
// Test all failed volumes
testVolumeConfig(0, 2, false, false);
}
/**
* Tests for a given volumes to be tolerated and volumes failed.
*/
private void testVolumeConfig(int volumesTolerated, int volumesFailed,
boolean expectedBPServiceState, boolean manageDfsDirs)
throws IOException, InterruptedException {
assumeTrue(!System.getProperty("os.name").startsWith("Windows"));
final int dnIndex = 0;
// Fail the current directory since invalid storage directory perms
// get fixed up automatically on datanode startup.
File[] dirs = {
new File(cluster.getInstanceStorageDir(dnIndex, 0), "current"),
new File(cluster.getInstanceStorageDir(dnIndex, 1), "current") };
try {
for (int i = 0; i < volumesFailed; i++) {
prepareDirToFail(dirs[i]);
}
restartDatanodes(volumesTolerated, manageDfsDirs);
assertEquals(expectedBPServiceState, cluster.getDataNodes().get(0)
.isBPServiceAlive(cluster.getNamesystem().getBlockPoolId()));
} finally {
for (File dir : dirs) {
FileUtil.chmod(dir.toString(), "755");
}
}
}
/**
* Prepare directories for a failure, set dir permission to 000
* @param dir
* @throws IOException
* @throws InterruptedException
*/
private void prepareDirToFail(File dir) throws IOException,
InterruptedException {
dir.mkdirs();
assertEquals("Couldn't chmod local vol", 0,
FileUtil.chmod(dir.toString(), "000"));
}
/**
* Test that a volume that is considered failed on startup is seen as
* a failed volume by the NN.
*/
@Test
public void testFailedVolumeOnStartupIsCounted() throws Exception {
assumeTrue(!System.getProperty("os.name").startsWith("Windows"));
final DatanodeManager dm = cluster.getNamesystem().getBlockManager(
).getDatanodeManager();
long origCapacity = DFSTestUtil.getLiveDatanodeCapacity(dm);
File dir = new File(cluster.getInstanceStorageDir(0, 0), "current");
try {
prepareDirToFail(dir);
restartDatanodes(1, false);
// The cluster is up..
assertEquals(true, cluster.getDataNodes().get(0)
.isBPServiceAlive(cluster.getNamesystem().getBlockPoolId()));
// but there has been a single volume failure
DFSTestUtil.waitForDatanodeStatus(dm, 1, 0, 1,
origCapacity / 2, WAIT_FOR_HEARTBEATS);
} finally {
FileUtil.chmod(dir.toString(), "755");
}
}
}
| 10,304 | 36.202166 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.VersionInfo;
import org.junit.Before;
import org.junit.Test;
public class TestDatanodeRegister {
public static final Log LOG = LogFactory.getLog(TestDatanodeRegister.class);
// Invalid address
private static final InetSocketAddress INVALID_ADDR =
new InetSocketAddress("127.0.0.1", 1);
private BPServiceActor actor;
NamespaceInfo fakeNsInfo;
DNConf mockDnConf;
@Before
public void setUp() throws IOException {
mockDnConf = mock(DNConf.class);
doReturn(VersionInfo.getVersion()).when(mockDnConf).getMinimumNameNodeVersion();
DataNode mockDN = mock(DataNode.class);
doReturn(true).when(mockDN).shouldRun();
doReturn(mockDnConf).when(mockDN).getDnConf();
BPOfferService mockBPOS = mock(BPOfferService.class);
doReturn(mockDN).when(mockBPOS).getDataNode();
actor = new BPServiceActor(INVALID_ADDR, mockBPOS);
fakeNsInfo = mock(NamespaceInfo.class);
// Return a a good software version.
doReturn(VersionInfo.getVersion()).when(fakeNsInfo).getSoftwareVersion();
// Return a good layout version for now.
doReturn(HdfsServerConstants.NAMENODE_LAYOUT_VERSION).when(fakeNsInfo)
.getLayoutVersion();
DatanodeProtocolClientSideTranslatorPB fakeDnProt =
mock(DatanodeProtocolClientSideTranslatorPB.class);
when(fakeDnProt.versionRequest()).thenReturn(fakeNsInfo);
actor.setNameNode(fakeDnProt);
}
@Test
public void testSoftwareVersionDifferences() throws Exception {
// We expect no exception to be thrown when the software versions match.
assertEquals(VersionInfo.getVersion(),
actor.retrieveNamespaceInfo().getSoftwareVersion());
// We expect no exception to be thrown when the min NN version is below the
// reported NN version.
doReturn("4.0.0").when(fakeNsInfo).getSoftwareVersion();
doReturn("3.0.0").when(mockDnConf).getMinimumNameNodeVersion();
assertEquals("4.0.0", actor.retrieveNamespaceInfo().getSoftwareVersion());
// When the NN reports a version that's too low, throw an exception.
doReturn("3.0.0").when(fakeNsInfo).getSoftwareVersion();
doReturn("4.0.0").when(mockDnConf).getMinimumNameNodeVersion();
try {
actor.retrieveNamespaceInfo();
fail("Should have thrown an exception for NN with too-low version");
} catch (IncorrectVersionException ive) {
GenericTestUtils.assertExceptionContains(
"The reported NameNode version is too low", ive);
LOG.info("Got expected exception", ive);
}
}
@Test
public void testDifferentLayoutVersions() throws Exception {
// We expect no exceptions to be thrown when the layout versions match.
assertEquals(HdfsServerConstants.NAMENODE_LAYOUT_VERSION,
actor.retrieveNamespaceInfo().getLayoutVersion());
// We expect an exception to be thrown when the NN reports a layout version
// different from that of the DN.
doReturn(HdfsServerConstants.NAMENODE_LAYOUT_VERSION * 1000).when(fakeNsInfo)
.getLayoutVersion();
try {
actor.retrieveNamespaceInfo();
} catch (IOException e) {
fail("Should not fail to retrieve NS info from DN with different layout version");
}
}
}
| 4,786 | 38.561983 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeUUID.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.junit.Test;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotEquals;
public class TestDataNodeUUID {
/**
* This test makes sure that we have a valid
* Node ID after the checkNodeUUID is done.
*/
@Test
public void testDatanodeUuid() throws Exception {
final InetSocketAddress NN_ADDR = new InetSocketAddress(
"localhost", 5020);
Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "0.0.0.0:0");
conf.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
conf.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0");
FileSystem.setDefaultUri(conf,
"hdfs://" + NN_ADDR.getHostName() + ":" + NN_ADDR.getPort());
ArrayList<StorageLocation> locations = new ArrayList<>();
DataNode dn = new DataNode(conf, locations, null);
//Assert that Node iD is null
String nullString = null;
assertEquals(dn.getDatanodeUuid(), nullString);
// CheckDataNodeUUID will create an UUID if UUID is null
dn.checkDatanodeUuid();
// Make sure that we have a valid DataNodeUUID at that point of time.
assertNotEquals(dn.getDatanodeUuid(), nullString);
}
}
| 2,329 | 34.30303 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDnRespectsBlockReportSplitThreshold.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import java.io.IOException;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.*;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_SPLIT_THRESHOLD_KEY;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After;
import org.junit.Test;
import org.mockito.ArgumentCaptor;
import org.mockito.Mockito;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertThat;
import static org.mockito.Matchers.*;
import static org.mockito.Mockito.times;
/**
* Tests that the DataNode respects
* {@link DFSConfigKeys#DFS_BLOCKREPORT_SPLIT_THRESHOLD_KEY}
*/
public class TestDnRespectsBlockReportSplitThreshold {
public static final Log LOG = LogFactory.getLog(TestStorageReport.class);
private static final int BLOCK_SIZE = 1024;
private static final short REPL_FACTOR = 1;
private static final long seed = 0xFEEDFACE;
private static final int BLOCKS_IN_FILE = 5;
private static Configuration conf;
private MiniDFSCluster cluster;
private DistributedFileSystem fs;
static String bpid;
public void startUpCluster(long splitThreshold) throws IOException {
conf = new HdfsConfiguration();
conf.setLong(DFS_BLOCKREPORT_SPLIT_THRESHOLD_KEY, splitThreshold);
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(REPL_FACTOR)
.build();
fs = cluster.getFileSystem();
bpid = cluster.getNamesystem().getBlockPoolId();
}
@After
public void shutDownCluster() throws IOException {
if (cluster != null) {
fs.close();
cluster.shutdown();
cluster = null;
}
}
private void createFile(String filenamePrefix, int blockCount)
throws IOException {
Path path = new Path("/" + filenamePrefix + ".dat");
DFSTestUtil.createFile(fs, path, BLOCK_SIZE,
blockCount * BLOCK_SIZE, BLOCK_SIZE, REPL_FACTOR, seed);
}
private void verifyCapturedArguments(
ArgumentCaptor<StorageBlockReport[]> captor,
int expectedReportsPerCall,
int expectedTotalBlockCount) {
List<StorageBlockReport[]> listOfReports = captor.getAllValues();
int numBlocksReported = 0;
for (StorageBlockReport[] reports : listOfReports) {
assertThat(reports.length, is(expectedReportsPerCall));
for (StorageBlockReport report : reports) {
BlockListAsLongs blockList = report.getBlocks();
numBlocksReported += blockList.getNumberOfBlocks();
}
}
assert(numBlocksReported >= expectedTotalBlockCount);
}
/**
* Test that if splitThreshold is zero, then we always get a separate
* call per storage.
*/
@Test(timeout=300000)
public void testAlwaysSplit() throws IOException, InterruptedException {
startUpCluster(0);
NameNode nn = cluster.getNameNode();
DataNode dn = cluster.getDataNodes().get(0);
// Create a file with a few blocks.
createFile(GenericTestUtils.getMethodName(), BLOCKS_IN_FILE);
// Insert a spy object for the NN RPC.
DatanodeProtocolClientSideTranslatorPB nnSpy =
DataNodeTestUtils.spyOnBposToNN(dn, nn);
// Trigger a block report so there is an interaction with the spy
// object.
DataNodeTestUtils.triggerBlockReport(dn);
ArgumentCaptor<StorageBlockReport[]> captor =
ArgumentCaptor.forClass(StorageBlockReport[].class);
Mockito.verify(nnSpy, times(cluster.getStoragesPerDatanode())).blockReport(
any(DatanodeRegistration.class),
anyString(),
captor.capture(), Mockito.<BlockReportContext>anyObject());
verifyCapturedArguments(captor, 1, BLOCKS_IN_FILE);
}
/**
* Tests the behavior when the count of blocks is exactly one less than
* the threshold.
*/
@Test(timeout=300000)
public void testCornerCaseUnderThreshold() throws IOException, InterruptedException {
startUpCluster(BLOCKS_IN_FILE + 1);
NameNode nn = cluster.getNameNode();
DataNode dn = cluster.getDataNodes().get(0);
// Create a file with a few blocks.
createFile(GenericTestUtils.getMethodName(), BLOCKS_IN_FILE);
// Insert a spy object for the NN RPC.
DatanodeProtocolClientSideTranslatorPB nnSpy =
DataNodeTestUtils.spyOnBposToNN(dn, nn);
// Trigger a block report so there is an interaction with the spy
// object.
DataNodeTestUtils.triggerBlockReport(dn);
ArgumentCaptor<StorageBlockReport[]> captor =
ArgumentCaptor.forClass(StorageBlockReport[].class);
Mockito.verify(nnSpy, times(1)).blockReport(
any(DatanodeRegistration.class),
anyString(),
captor.capture(), Mockito.<BlockReportContext>anyObject());
verifyCapturedArguments(captor, cluster.getStoragesPerDatanode(), BLOCKS_IN_FILE);
}
/**
* Tests the behavior when the count of blocks is exactly equal to the
* threshold.
*/
@Test(timeout=300000)
public void testCornerCaseAtThreshold() throws IOException, InterruptedException {
startUpCluster(BLOCKS_IN_FILE);
NameNode nn = cluster.getNameNode();
DataNode dn = cluster.getDataNodes().get(0);
// Create a file with a few blocks.
createFile(GenericTestUtils.getMethodName(), BLOCKS_IN_FILE);
// Insert a spy object for the NN RPC.
DatanodeProtocolClientSideTranslatorPB nnSpy =
DataNodeTestUtils.spyOnBposToNN(dn, nn);
// Trigger a block report so there is an interaction with the spy
// object.
DataNodeTestUtils.triggerBlockReport(dn);
ArgumentCaptor<StorageBlockReport[]> captor =
ArgumentCaptor.forClass(StorageBlockReport[].class);
Mockito.verify(nnSpy, times(cluster.getStoragesPerDatanode())).blockReport(
any(DatanodeRegistration.class),
anyString(),
captor.capture(), Mockito.<BlockReportContext>anyObject());
verifyCapturedArguments(captor, 1, BLOCKS_IN_FILE);
}
}
| 7,208 | 33.826087 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataStorage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
import java.io.File;
import java.io.IOException;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.List;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
public class TestDataStorage {
private final static String DEFAULT_BPID = "bp-0";
private final static String CLUSTER_ID = "cluster0";
private final static String BUILD_VERSION = "2.0";
private final static String SOFTWARE_VERSION = "2.0";
private final static long CTIME = 1;
private final static File TEST_DIR =
new File(System.getProperty("test.build.data") + "/dstest");
private final static StartupOption START_OPT = StartupOption.REGULAR;
private DataNode mockDN = Mockito.mock(DataNode.class);
private NamespaceInfo nsInfo;
private DataStorage storage;
@Before
public void setUp() throws IOException {
storage = new DataStorage();
nsInfo = new NamespaceInfo(0, CLUSTER_ID, DEFAULT_BPID, CTIME,
BUILD_VERSION, SOFTWARE_VERSION);
FileUtil.fullyDelete(TEST_DIR);
assertTrue("Failed to make test dir.", TEST_DIR.mkdirs());
}
@After
public void tearDown() throws IOException {
storage.unlockAll();
FileUtil.fullyDelete(TEST_DIR);
}
private static List<StorageLocation> createStorageLocations(int numLocs)
throws IOException {
return createStorageLocations(numLocs, false);
}
/**
* Create a list of StorageLocations.
* If asFile sets to true, create StorageLocation as regular files, otherwise
* create directories for each location.
* @param numLocs the total number of StorageLocations to be created.
* @param asFile set to true to create as file.
* @return a list of StorageLocations.
*/
private static List<StorageLocation> createStorageLocations(
int numLocs, boolean asFile) throws IOException {
List<StorageLocation> locations = new ArrayList<StorageLocation>();
for (int i = 0; i < numLocs; i++) {
String uri = TEST_DIR + "/data" + i;
File file = new File(uri);
if (asFile) {
file.getParentFile().mkdirs();
file.createNewFile();
} else {
file.mkdirs();
}
StorageLocation loc = StorageLocation.parse(uri);
locations.add(loc);
}
return locations;
}
private static List<NamespaceInfo> createNamespaceInfos(int num) {
List<NamespaceInfo> nsInfos = new ArrayList<NamespaceInfo>();
for (int i = 0; i < num; i++) {
String bpid = "bp-" + i;
nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, CTIME, BUILD_VERSION,
SOFTWARE_VERSION));
}
return nsInfos;
}
/** Check whether the path is a valid DataNode data directory. */
private static void checkDir(File dataDir) {
Storage.StorageDirectory sd = new Storage.StorageDirectory(dataDir);
assertTrue(sd.getRoot().isDirectory());
assertTrue(sd.getCurrentDir().isDirectory());
assertTrue(sd.getVersionFile().isFile());
}
/** Check whether the root is a valid BlockPoolSlice storage. */
private static void checkDir(File root, String bpid) {
Storage.StorageDirectory sd = new Storage.StorageDirectory(root);
File bpRoot = new File(sd.getCurrentDir(), bpid);
Storage.StorageDirectory bpSd = new Storage.StorageDirectory(bpRoot);
assertTrue(bpSd.getRoot().isDirectory());
assertTrue(bpSd.getCurrentDir().isDirectory());
assertTrue(bpSd.getVersionFile().isFile());
}
@Test
public void testAddStorageDirectories() throws IOException,
URISyntaxException {
final int numLocations = 3;
final int numNamespace = 3;
List<StorageLocation> locations = createStorageLocations(numLocations);
// Add volumes for multiple namespaces.
List<NamespaceInfo> namespaceInfos = createNamespaceInfos(numNamespace);
for (NamespaceInfo ni : namespaceInfos) {
storage.addStorageLocations(mockDN, ni, locations, START_OPT);
for (StorageLocation sl : locations) {
checkDir(sl.getFile());
checkDir(sl.getFile(), ni.getBlockPoolID());
}
}
assertEquals(numLocations, storage.getNumStorageDirs());
locations = createStorageLocations(numLocations);
List<StorageLocation> addedLocation =
storage.addStorageLocations(mockDN, namespaceInfos.get(0),
locations, START_OPT);
assertTrue(addedLocation.isEmpty());
// The number of active storage dirs has not changed, since it tries to
// add the storage dirs that are under service.
assertEquals(numLocations, storage.getNumStorageDirs());
// Add more directories.
locations = createStorageLocations(6);
storage.addStorageLocations(mockDN, nsInfo, locations, START_OPT);
assertEquals(6, storage.getNumStorageDirs());
}
@Test
public void testRecoverTransitionReadFailure() throws IOException {
final int numLocations = 3;
List<StorageLocation> locations =
createStorageLocations(numLocations, true);
try {
storage.recoverTransitionRead(mockDN, nsInfo, locations, START_OPT);
fail("An IOException should throw: all StorageLocations are NON_EXISTENT");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains(
"All specified directories are failed to load.", e);
}
assertEquals(0, storage.getNumStorageDirs());
}
/**
* This test enforces the behavior that if there is an exception from
* doTransition() during DN starts up, the storage directories that have
* already been processed are still visible, i.e., in
* DataStorage.storageDirs().
*/
@Test
public void testRecoverTransitionReadDoTransitionFailure()
throws IOException {
final int numLocations = 3;
List<StorageLocation> locations = createStorageLocations(numLocations);
// Prepare volumes
storage.recoverTransitionRead(mockDN, nsInfo, locations, START_OPT);
assertEquals(numLocations, storage.getNumStorageDirs());
// Reset DataStorage
storage.unlockAll();
storage = new DataStorage();
// Trigger an exception from doTransition().
nsInfo.clusterID = "cluster1";
try {
storage.recoverTransitionRead(mockDN, nsInfo, locations, START_OPT);
fail("Expect to throw an exception from doTransition()");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("All specified directories", e);
}
assertEquals(0, storage.getNumStorageDirs());
}
}
| 7,684 | 35.947115 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import static org.junit.Assert.*;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.Socket;
import java.net.SocketException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Random;
import java.util.concurrent.TimeoutException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.client.BlockReportOptions;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.hdfs.util.DataTransferThrottler;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.util.Time;
import org.junit.Test;
/**
* This class tests if block replacement request to data nodes work correctly.
*/
public class TestBlockReplacement {
private static final Log LOG = LogFactory.getLog(
"org.apache.hadoop.hdfs.TestBlockReplacement");
MiniDFSCluster cluster;
@Test
public void testThrottler() throws IOException {
Configuration conf = new HdfsConfiguration();
FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
long bandwidthPerSec = 1024*1024L;
final long TOTAL_BYTES =6*bandwidthPerSec;
long bytesToSend = TOTAL_BYTES;
long start = Time.monotonicNow();
DataTransferThrottler throttler = new DataTransferThrottler(bandwidthPerSec);
long totalBytes = 0L;
long bytesSent = 1024*512L; // 0.5MB
throttler.throttle(bytesSent);
bytesToSend -= bytesSent;
bytesSent = 1024*768L; // 0.75MB
throttler.throttle(bytesSent);
bytesToSend -= bytesSent;
try {
Thread.sleep(1000);
} catch (InterruptedException ignored) {}
throttler.throttle(bytesToSend);
long end = Time.monotonicNow();
assertTrue(totalBytes*1000/(end-start)<=bandwidthPerSec);
}
@Test
public void testBlockReplacement() throws Exception {
final Configuration CONF = new HdfsConfiguration();
final String[] INITIAL_RACKS = {"/RACK0", "/RACK1", "/RACK2"};
final String[] NEW_RACKS = {"/RACK2"};
final short REPLICATION_FACTOR = (short)3;
final int DEFAULT_BLOCK_SIZE = 1024;
final Random r = new Random();
CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
CONF.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE/2);
CONF.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,500);
cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(REPLICATION_FACTOR)
.racks(INITIAL_RACKS).build();
try {
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
Path fileName = new Path("/tmp.txt");
// create a file with one block
DFSTestUtil.createFile(fs, fileName,
DEFAULT_BLOCK_SIZE, REPLICATION_FACTOR, r.nextLong());
DFSTestUtil.waitReplication(fs,fileName, REPLICATION_FACTOR);
// get all datanodes
InetSocketAddress addr = new InetSocketAddress("localhost",
cluster.getNameNodePort());
DFSClient client = new DFSClient(addr, CONF);
List<LocatedBlock> locatedBlocks = client.getNamenode().
getBlockLocations("/tmp.txt", 0, DEFAULT_BLOCK_SIZE).getLocatedBlocks();
assertEquals(1, locatedBlocks.size());
LocatedBlock block = locatedBlocks.get(0);
DatanodeInfo[] oldNodes = block.getLocations();
assertEquals(oldNodes.length, 3);
ExtendedBlock b = block.getBlock();
// add a fourth datanode to the cluster
cluster.startDataNodes(CONF, 1, true, null, NEW_RACKS);
cluster.waitActive();
DatanodeInfo[] datanodes = client.datanodeReport(DatanodeReportType.ALL);
// find out the new node
DatanodeInfo newNode=null;
for(DatanodeInfo node:datanodes) {
Boolean isNewNode = true;
for(DatanodeInfo oldNode:oldNodes) {
if(node.equals(oldNode)) {
isNewNode = false;
break;
}
}
if(isNewNode) {
newNode = node;
break;
}
}
assertTrue(newNode!=null);
DatanodeInfo source=null;
ArrayList<DatanodeInfo> proxies = new ArrayList<DatanodeInfo>(2);
for(DatanodeInfo node:datanodes) {
if(node != newNode) {
if( node.getNetworkLocation().equals(newNode.getNetworkLocation())) {
source = node;
} else {
proxies.add( node );
}
}
}
//current state: the newNode is on RACK2, and "source" is the other dn on RACK2.
//the two datanodes on RACK0 and RACK1 are in "proxies".
//"source" and both "proxies" all contain the block, while newNode doesn't yet.
assertTrue(source!=null && proxies.size()==2);
// start to replace the block
// case 1: proxySource does not contain the block
LOG.info("Testcase 1: Proxy " + newNode
+ " does not contain the block " + b);
assertFalse(replaceBlock(b, source, newNode, proxies.get(0)));
// case 2: destination already contains the block
LOG.info("Testcase 2: Destination " + proxies.get(1)
+ " contains the block " + b);
assertFalse(replaceBlock(b, source, proxies.get(0), proxies.get(1)));
// case 3: correct case
LOG.info("Testcase 3: Source=" + source + " Proxy=" +
proxies.get(0) + " Destination=" + newNode );
assertTrue(replaceBlock(b, source, proxies.get(0), newNode));
// after cluster has time to resolve the over-replication,
// block locations should contain two proxies and newNode
// but not source
checkBlocks(new DatanodeInfo[]{newNode, proxies.get(0), proxies.get(1)},
fileName.toString(),
DEFAULT_BLOCK_SIZE, REPLICATION_FACTOR, client);
// case 4: proxies.get(0) is not a valid del hint
// expect either source or newNode replica to be deleted instead
LOG.info("Testcase 4: invalid del hint " + proxies.get(0) );
assertTrue(replaceBlock(b, proxies.get(0), proxies.get(1), source));
// after cluster has time to resolve the over-replication,
// block locations should contain two proxies,
// and either source or newNode, but not both.
checkBlocks(proxies.toArray(new DatanodeInfo[proxies.size()]),
fileName.toString(),
DEFAULT_BLOCK_SIZE, REPLICATION_FACTOR, client);
} finally {
cluster.shutdown();
}
}
@Test
public void testBlockMoveAcrossStorageInSameNode() throws Exception {
final Configuration conf = new HdfsConfiguration();
// create only one datanode in the cluster to verify movement within
// datanode.
final MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(1).storageTypes(
new StorageType[] { StorageType.DISK, StorageType.ARCHIVE })
.build();
try {
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final Path file = new Path("/testBlockMoveAcrossStorageInSameNode/file");
DFSTestUtil.createFile(dfs, file, 1024, (short) 1, 1024);
LocatedBlocks locatedBlocks = dfs.getClient().getLocatedBlocks(file.toString(), 0);
// get the current
LocatedBlock locatedBlock = locatedBlocks.get(0);
ExtendedBlock block = locatedBlock.getBlock();
DatanodeInfo[] locations = locatedBlock.getLocations();
assertEquals(1, locations.length);
StorageType[] storageTypes = locatedBlock.getStorageTypes();
// current block should be written to DISK
assertTrue(storageTypes[0] == StorageType.DISK);
DatanodeInfo source = locations[0];
// move block to ARCHIVE by using same DataNodeInfo for source, proxy and
// destination so that movement happens within datanode
assertTrue(replaceBlock(block, source, source, source,
StorageType.ARCHIVE));
// wait till namenode notified
Thread.sleep(3000);
locatedBlocks = dfs.getClient().getLocatedBlocks(file.toString(), 0);
// get the current
locatedBlock = locatedBlocks.get(0);
assertEquals("Storage should be only one", 1,
locatedBlock.getLocations().length);
assertTrue("Block should be moved to ARCHIVE", locatedBlock
.getStorageTypes()[0] == StorageType.ARCHIVE);
} finally {
cluster.shutdown();
}
}
/* check if file's blocks have expected number of replicas,
* and exist at all of includeNodes
*/
private void checkBlocks(DatanodeInfo[] includeNodes, String fileName,
long fileLen, short replFactor, DFSClient client)
throws IOException, TimeoutException {
boolean notDone;
final long TIMEOUT = 20000L;
long starttime = Time.monotonicNow();
long failtime = starttime + TIMEOUT;
do {
try {
Thread.sleep(100);
} catch(InterruptedException e) {
}
List<LocatedBlock> blocks = client.getNamenode().
getBlockLocations(fileName, 0, fileLen).getLocatedBlocks();
assertEquals(1, blocks.size());
DatanodeInfo[] nodes = blocks.get(0).getLocations();
notDone = (nodes.length != replFactor);
if (notDone) {
LOG.info("Expected replication factor is " + replFactor +
" but the real replication factor is " + nodes.length );
} else {
List<DatanodeInfo> nodeLocations = Arrays.asList(nodes);
for (DatanodeInfo node : includeNodes) {
if (!nodeLocations.contains(node) ) {
notDone=true;
LOG.info("Block is not located at " + node );
break;
}
}
}
if (Time.monotonicNow() > failtime) {
String expectedNodesList = "";
String currentNodesList = "";
for (DatanodeInfo dn : includeNodes)
expectedNodesList += dn + ", ";
for (DatanodeInfo dn : nodes)
currentNodesList += dn + ", ";
LOG.info("Expected replica nodes are: " + expectedNodesList);
LOG.info("Current actual replica nodes are: " + currentNodesList);
throw new TimeoutException(
"Did not achieve expected replication to expected nodes "
+ "after more than " + TIMEOUT + " msec. See logs for details.");
}
} while(notDone);
LOG.info("Achieved expected replication values in "
+ (Time.now() - starttime) + " msec.");
}
/* Copy a block from sourceProxy to destination. If the block becomes
* over-replicated, preferably remove it from source.
*
* Return true if a block is successfully copied; otherwise false.
*/
private boolean replaceBlock( ExtendedBlock block, DatanodeInfo source,
DatanodeInfo sourceProxy, DatanodeInfo destination) throws IOException {
return replaceBlock(block, source, sourceProxy, destination,
StorageType.DEFAULT);
}
/*
* Replace block
*/
private boolean replaceBlock(
ExtendedBlock block,
DatanodeInfo source,
DatanodeInfo sourceProxy,
DatanodeInfo destination,
StorageType targetStorageType) throws IOException, SocketException {
Socket sock = new Socket();
try {
sock.connect(NetUtils.createSocketAddr(destination.getXferAddr()),
HdfsServerConstants.READ_TIMEOUT);
sock.setKeepAlive(true);
// sendRequest
DataOutputStream out = new DataOutputStream(sock.getOutputStream());
new Sender(out).replaceBlock(block, targetStorageType,
BlockTokenSecretManager.DUMMY_TOKEN, source.getDatanodeUuid(),
sourceProxy);
out.flush();
// receiveResponse
DataInputStream reply = new DataInputStream(sock.getInputStream());
BlockOpResponseProto proto =
BlockOpResponseProto.parseDelimitedFrom(reply);
while (proto.getStatus() == Status.IN_PROGRESS) {
proto = BlockOpResponseProto.parseDelimitedFrom(reply);
}
return proto.getStatus() == Status.SUCCESS;
} finally {
sock.close();
}
}
/**
* Standby namenode doesn't queue Delete block request when the add block
* request is in the edit log which are yet to be read.
* @throws Exception
*/
@Test
public void testDeletedBlockWhenAddBlockIsInEdit() throws Exception {
Configuration conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleHATopology())
.numDataNodes(1).build();
DFSClient client = null;
try {
cluster.waitActive();
assertEquals("Number of namenodes is not 2", 2,
cluster.getNumNameNodes());
// Transitioning the namenode 0 to active.
cluster.transitionToActive(0);
assertTrue("Namenode 0 should be in active state",
cluster.getNameNode(0).isActiveState());
assertTrue("Namenode 1 should be in standby state",
cluster.getNameNode(1).isStandbyState());
// Trigger heartbeat to mark DatanodeStorageInfo#heartbeatedSinceFailover
// to true.
DataNodeTestUtils.triggerHeartbeat(cluster.getDataNodes().get(0));
FileSystem fs = cluster.getFileSystem(0);
// Trigger blockReport to mark DatanodeStorageInfo#blockContentsStale
// to false.
cluster.getDataNodes().get(0).triggerBlockReport(
new BlockReportOptions.Factory().setIncremental(false).build());
Path fileName = new Path("/tmp.txt");
// create a file with one block
DFSTestUtil.createFile(fs, fileName, 10L, (short)1, 1234L);
DFSTestUtil.waitReplication(fs,fileName, (short)1);
client = new DFSClient(cluster.getFileSystem(0).getUri(), conf);
List<LocatedBlock> locatedBlocks = client.getNamenode().
getBlockLocations("/tmp.txt", 0, 10L).getLocatedBlocks();
assertTrue(locatedBlocks.size() == 1);
assertTrue(locatedBlocks.get(0).getLocations().length == 1);
// add a second datanode to the cluster
cluster.startDataNodes(conf, 1, true, null, null, null, null);
assertEquals("Number of datanodes should be 2", 2,
cluster.getDataNodes().size());
DataNode dn0 = cluster.getDataNodes().get(0);
DataNode dn1 = cluster.getDataNodes().get(1);
String activeNNBPId = cluster.getNamesystem(0).getBlockPoolId();
DatanodeDescriptor sourceDnDesc = NameNodeAdapter.getDatanode(
cluster.getNamesystem(0), dn0.getDNRegistrationForBP(activeNNBPId));
DatanodeDescriptor destDnDesc = NameNodeAdapter.getDatanode(
cluster.getNamesystem(0), dn1.getDNRegistrationForBP(activeNNBPId));
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
LOG.info("replaceBlock: " + replaceBlock(block,
(DatanodeInfo)sourceDnDesc, (DatanodeInfo)sourceDnDesc,
(DatanodeInfo)destDnDesc));
// Waiting for the FsDatasetAsyncDsikService to delete the block
Thread.sleep(3000);
// Triggering the incremental block report to report the deleted block to
// namnemode
cluster.getDataNodes().get(0).triggerBlockReport(
new BlockReportOptions.Factory().setIncremental(true).build());
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
assertTrue("Namenode 1 should be in active state",
cluster.getNameNode(1).isActiveState());
assertTrue("Namenode 0 should be in standby state",
cluster.getNameNode(0).isStandbyState());
client.close();
// Opening a new client for new active namenode
client = new DFSClient(cluster.getFileSystem(1).getUri(), conf);
List<LocatedBlock> locatedBlocks1 = client.getNamenode()
.getBlockLocations("/tmp.txt", 0, 10L).getLocatedBlocks();
assertEquals(1, locatedBlocks1.size());
assertEquals("The block should be only on 1 datanode ", 1,
locatedBlocks1.get(0).getLocations().length);
} finally {
IOUtils.cleanup(null, client);
cluster.shutdown();
}
}
/**
* @param args
*/
public static void main(String[] args) throws Exception {
(new TestBlockReplacement()).testBlockReplacement();
}
}
| 18,248 | 39.553333 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import java.io.FileDescriptor;
import java.io.IOException;
import java.util.Arrays;
import java.util.Map;
import java.util.TreeMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.io.nativeio.NativeIO.POSIX.CacheManipulator;
import org.apache.hadoop.io.nativeio.NativeIOException;
import static org.apache.hadoop.io.nativeio.NativeIO.POSIX.POSIX_FADV_DONTNEED;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestCachingStrategy {
private static final Log LOG = LogFactory.getLog(TestCachingStrategy.class);
private static final int MAX_TEST_FILE_LEN = 1024 * 1024;
private static final int WRITE_PACKET_SIZE = DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
private final static TestRecordingCacheTracker tracker =
new TestRecordingCacheTracker();
@BeforeClass
public static void setupTest() {
EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
// Track calls to posix_fadvise.
NativeIO.POSIX.setCacheManipulator(tracker);
// Normally, we wait for a few megabytes of data to be read or written
// before dropping the cache. This is to avoid an excessive number of
// JNI calls to the posix_fadvise function. However, for the purpose
// of this test, we want to use small files and see all fadvise calls
// happen.
BlockSender.CACHE_DROP_INTERVAL_BYTES = 4096;
BlockReceiver.CACHE_DROP_LAG_BYTES = 4096;
}
private static class Stats {
private final String fileName;
private final boolean dropped[] = new boolean[MAX_TEST_FILE_LEN];
Stats(String fileName) {
this.fileName = fileName;
}
synchronized void fadvise(int offset, int len, int flags) {
LOG.debug("got fadvise(offset=" + offset + ", len=" + len +
",flags=" + flags + ")");
if (flags == POSIX_FADV_DONTNEED) {
for (int i = 0; i < len; i++) {
dropped[(offset + i)] = true;
}
}
}
synchronized void assertNotDroppedInRange(int start, int end) {
for (int i = start; i < end; i++) {
if (dropped[i]) {
throw new RuntimeException("in file " + fileName + ", we " +
"dropped the cache at offset " + i);
}
}
}
synchronized void assertDroppedInRange(int start, int end) {
for (int i = start; i < end; i++) {
if (!dropped[i]) {
throw new RuntimeException("in file " + fileName + ", we " +
"did not drop the cache at offset " + i);
}
}
}
synchronized void clear() {
Arrays.fill(dropped, false);
}
}
private static class TestRecordingCacheTracker extends CacheManipulator {
private final Map<String, Stats> map = new TreeMap<String, Stats>();
@Override
public void posixFadviseIfPossible(String name,
FileDescriptor fd, long offset, long len, int flags)
throws NativeIOException {
if ((len < 0) || (len > Integer.MAX_VALUE)) {
throw new RuntimeException("invalid length of " + len +
" passed to posixFadviseIfPossible");
}
if ((offset < 0) || (offset > Integer.MAX_VALUE)) {
throw new RuntimeException("invalid offset of " + offset +
" passed to posixFadviseIfPossible");
}
Stats stats = map.get(name);
if (stats == null) {
stats = new Stats(name);
map.put(name, stats);
}
stats.fadvise((int)offset, (int)len, flags);
super.posixFadviseIfPossible(name, fd, offset, len, flags);
}
synchronized void clear() {
map.clear();
}
synchronized Stats getStats(String fileName) {
return map.get(fileName);
}
synchronized public String toString() {
StringBuilder bld = new StringBuilder();
bld.append("TestRecordingCacheManipulator{");
String prefix = "";
for (String fileName : map.keySet()) {
bld.append(prefix);
prefix = ", ";
bld.append(fileName);
}
bld.append("}");
return bld.toString();
}
}
static void createHdfsFile(FileSystem fs, Path p, long length,
Boolean dropBehind) throws Exception {
FSDataOutputStream fos = null;
try {
// create file with replication factor of 1
fos = fs.create(p, (short)1);
if (dropBehind != null) {
fos.setDropBehind(dropBehind);
}
byte buf[] = new byte[8196];
while (length > 0) {
int amt = (length > buf.length) ? buf.length : (int)length;
fos.write(buf, 0, amt);
length -= amt;
}
} catch (IOException e) {
LOG.error("ioexception", e);
} finally {
if (fos != null) {
fos.close();
}
}
}
static long readHdfsFile(FileSystem fs, Path p, long length,
Boolean dropBehind) throws Exception {
FSDataInputStream fis = null;
long totalRead = 0;
try {
fis = fs.open(p);
if (dropBehind != null) {
fis.setDropBehind(dropBehind);
}
byte buf[] = new byte[8196];
while (length > 0) {
int amt = (length > buf.length) ? buf.length : (int)length;
int ret = fis.read(buf, 0, amt);
if (ret == -1) {
return totalRead;
}
totalRead += ret;
length -= ret;
}
} catch (IOException e) {
LOG.error("ioexception", e);
} finally {
if (fis != null) {
fis.close();
}
}
throw new RuntimeException("unreachable");
}
@Test(timeout=120000)
public void testFadviseAfterWriteThenRead() throws Exception {
// start a cluster
LOG.info("testFadviseAfterWriteThenRead");
tracker.clear();
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
String TEST_PATH = "/test";
int TEST_PATH_LEN = MAX_TEST_FILE_LEN;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
.build();
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
// create new file
createHdfsFile(fs, new Path(TEST_PATH), TEST_PATH_LEN, true);
// verify that we dropped everything from the cache during file creation.
ExtendedBlock block = cluster.getNameNode().getRpcServer().getBlockLocations(
TEST_PATH, 0, Long.MAX_VALUE).get(0).getBlock();
String fadvisedFileName = cluster.getBlockFile(0, block).getName();
Stats stats = tracker.getStats(fadvisedFileName);
stats.assertDroppedInRange(0, TEST_PATH_LEN - WRITE_PACKET_SIZE);
stats.clear();
// read file
readHdfsFile(fs, new Path(TEST_PATH), Long.MAX_VALUE, true);
// verify that we dropped everything from the cache.
Assert.assertNotNull(stats);
stats.assertDroppedInRange(0, TEST_PATH_LEN - WRITE_PACKET_SIZE);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
/***
* Test the scenario where the DataNode defaults to not dropping the cache,
* but our client defaults are set.
*/
@Test(timeout=120000)
public void testClientDefaults() throws Exception {
// start a cluster
LOG.info("testClientDefaults");
tracker.clear();
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_READS_KEY, false);
conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_WRITES_KEY, false);
conf.setBoolean(DFSConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_READS, true);
conf.setBoolean(DFSConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_WRITES, true);
MiniDFSCluster cluster = null;
String TEST_PATH = "/test";
int TEST_PATH_LEN = MAX_TEST_FILE_LEN;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
.build();
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
// create new file
createHdfsFile(fs, new Path(TEST_PATH), TEST_PATH_LEN, null);
// verify that we dropped everything from the cache during file creation.
ExtendedBlock block = cluster.getNameNode().getRpcServer().getBlockLocations(
TEST_PATH, 0, Long.MAX_VALUE).get(0).getBlock();
String fadvisedFileName = cluster.getBlockFile(0, block).getName();
Stats stats = tracker.getStats(fadvisedFileName);
stats.assertDroppedInRange(0, TEST_PATH_LEN - WRITE_PACKET_SIZE);
stats.clear();
// read file
readHdfsFile(fs, new Path(TEST_PATH), Long.MAX_VALUE, null);
// verify that we dropped everything from the cache.
Assert.assertNotNull(stats);
stats.assertDroppedInRange(0, TEST_PATH_LEN - WRITE_PACKET_SIZE);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test(timeout=120000)
public void testFadviseSkippedForSmallReads() throws Exception {
// start a cluster
LOG.info("testFadviseSkippedForSmallReads");
tracker.clear();
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_READS_KEY, true);
conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_WRITES_KEY, true);
MiniDFSCluster cluster = null;
String TEST_PATH = "/test";
int TEST_PATH_LEN = MAX_TEST_FILE_LEN;
FSDataInputStream fis = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
.build();
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
// create new file
createHdfsFile(fs, new Path(TEST_PATH), TEST_PATH_LEN, null);
// Since the DataNode was configured with drop-behind, and we didn't
// specify any policy, we should have done drop-behind.
ExtendedBlock block = cluster.getNameNode().getRpcServer().getBlockLocations(
TEST_PATH, 0, Long.MAX_VALUE).get(0).getBlock();
String fadvisedFileName = cluster.getBlockFile(0, block).getName();
Stats stats = tracker.getStats(fadvisedFileName);
stats.assertDroppedInRange(0, TEST_PATH_LEN - WRITE_PACKET_SIZE);
stats.clear();
stats.assertNotDroppedInRange(0, TEST_PATH_LEN);
// read file
fis = fs.open(new Path(TEST_PATH));
byte buf[] = new byte[17];
fis.readFully(4096, buf, 0, buf.length);
// we should not have dropped anything because of the small read.
stats = tracker.getStats(fadvisedFileName);
stats.assertNotDroppedInRange(0, TEST_PATH_LEN - WRITE_PACKET_SIZE);
} finally {
IOUtils.cleanup(null, fis);
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test(timeout=120000)
public void testNoFadviseAfterWriteThenRead() throws Exception {
// start a cluster
LOG.info("testNoFadviseAfterWriteThenRead");
tracker.clear();
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
String TEST_PATH = "/test";
int TEST_PATH_LEN = MAX_TEST_FILE_LEN;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
.build();
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
// create new file
createHdfsFile(fs, new Path(TEST_PATH), TEST_PATH_LEN, false);
// verify that we did not drop everything from the cache during file creation.
ExtendedBlock block = cluster.getNameNode().getRpcServer().getBlockLocations(
TEST_PATH, 0, Long.MAX_VALUE).get(0).getBlock();
String fadvisedFileName = cluster.getBlockFile(0, block).getName();
Stats stats = tracker.getStats(fadvisedFileName);
Assert.assertNull(stats);
// read file
readHdfsFile(fs, new Path(TEST_PATH), Long.MAX_VALUE, false);
// verify that we dropped everything from the cache.
Assert.assertNull(stats);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test(timeout=120000)
public void testSeekAfterSetDropBehind() throws Exception {
// start a cluster
LOG.info("testSeekAfterSetDropBehind");
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
String TEST_PATH = "/test";
int TEST_PATH_LEN = MAX_TEST_FILE_LEN;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
.build();
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
createHdfsFile(fs, new Path(TEST_PATH), TEST_PATH_LEN, false);
// verify that we can seek after setDropBehind
FSDataInputStream fis = fs.open(new Path(TEST_PATH));
try {
Assert.assertTrue(fis.read() != -1); // create BlockReader
fis.setDropBehind(false); // clear BlockReader
fis.seek(2); // seek
} finally {
fis.close();
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
}
| 14,291 | 34.20197 | 98 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeExit.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
/**
* Tests if DataNode process exits if all Block Pool services exit.
*/
public class TestDataNodeExit {
private static final long WAIT_TIME_IN_MILLIS = 10;
Configuration conf;
MiniDFSCluster cluster = null;
@Before
public void setUp() throws IOException {
conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 100);
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 100);
cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleFederatedTopology(3))
.build();
for (int i = 0; i < 3; i++) {
cluster.waitActive(i);
}
}
@After
public void tearDown() throws Exception {
if (cluster != null)
cluster.shutdown();
}
private void stopBPServiceThreads(int numStopThreads, DataNode dn)
throws Exception {
List<BPOfferService> bpoList = dn.getAllBpOs();
int expected = dn.getBpOsCount() - numStopThreads;
int index = numStopThreads - 1;
while (index >= 0) {
bpoList.get(index--).stop();
}
int iterations = 3000; // Total 30 seconds MAX wait time
while(dn.getBpOsCount() != expected && iterations > 0) {
Thread.sleep(WAIT_TIME_IN_MILLIS);
iterations--;
}
assertEquals("Mismatch in number of BPServices running", expected,
dn.getBpOsCount());
}
/**
* Test BPService Thread Exit
*/
@Test
public void testBPServiceExit() throws Exception {
DataNode dn = cluster.getDataNodes().get(0);
stopBPServiceThreads(1, dn);
assertTrue("DataNode should not exit", dn.isDatanodeUp());
stopBPServiceThreads(2, dn);
assertFalse("DataNode should exit", dn.isDatanodeUp());
}
@Test
public void testSendOOBToPeers() throws Exception {
DataNode dn = cluster.getDataNodes().get(0);
DataXceiverServer spyXserver = Mockito.spy(dn.getXferServer());
NullPointerException e = new NullPointerException();
Mockito.doThrow(e).when(spyXserver).sendOOBToPeers();
dn.xserver = spyXserver;
try {
dn.shutdown();
} catch (Throwable t) {
fail("DataNode shutdown should not have thrown exception " + t);
}
}
}
| 3,566 | 31.724771 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeECN.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck;
import org.junit.Assert;
import org.junit.Test;
import java.io.IOException;
public class TestDataNodeECN {
@Test
public void testECNFlag() throws IOException {
Configuration conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_PIPELINE_ECN_ENABLED, true);
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
PipelineAck.ECN ecn = cluster.getDataNodes().get(0).getECN();
Assert.assertNotEquals(PipelineAck.ECN.DISABLED, ecn);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
}
| 1,677 | 35.478261 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import java.io.*;
import java.util.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.StorageType;
import org.junit.Test;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.server.datanode.DataNode.DataNodeDiskChecker;
public class TestDataDirs {
@Test (timeout = 30000)
public void testDataDirParsing() throws Throwable {
Configuration conf = new Configuration();
List<StorageLocation> locations;
File dir0 = new File("/dir0");
File dir1 = new File("/dir1");
File dir2 = new File("/dir2");
File dir3 = new File("/dir3");
File dir4 = new File("/dir4");
// Verify that a valid string is correctly parsed, and that storage
// type is not case-sensitive
String locations1 = "[disk]/dir0,[DISK]/dir1,[sSd]/dir2,[disK]/dir3,[ram_disk]/dir4";
conf.set(DFS_DATANODE_DATA_DIR_KEY, locations1);
locations = DataNode.getStorageLocations(conf);
assertThat(locations.size(), is(5));
assertThat(locations.get(0).getStorageType(), is(StorageType.DISK));
assertThat(locations.get(0).getUri(), is(dir0.toURI()));
assertThat(locations.get(1).getStorageType(), is(StorageType.DISK));
assertThat(locations.get(1).getUri(), is(dir1.toURI()));
assertThat(locations.get(2).getStorageType(), is(StorageType.SSD));
assertThat(locations.get(2).getUri(), is(dir2.toURI()));
assertThat(locations.get(3).getStorageType(), is(StorageType.DISK));
assertThat(locations.get(3).getUri(), is(dir3.toURI()));
assertThat(locations.get(4).getStorageType(), is(StorageType.RAM_DISK));
assertThat(locations.get(4).getUri(), is(dir4.toURI()));
// Verify that an unrecognized storage type result in an exception.
String locations2 = "[BadMediaType]/dir0,[ssd]/dir1,[disk]/dir2";
conf.set(DFS_DATANODE_DATA_DIR_KEY, locations2);
try {
locations = DataNode.getStorageLocations(conf);
fail();
} catch(IllegalArgumentException iae) {
DataNode.LOG.info("The exception is expected.", iae);
}
// Assert that a string with no storage type specified is
// correctly parsed and the default storage type is picked up.
String locations3 = "/dir0,/dir1";
conf.set(DFS_DATANODE_DATA_DIR_KEY, locations3);
locations = DataNode.getStorageLocations(conf);
assertThat(locations.size(), is(2));
assertThat(locations.get(0).getStorageType(), is(StorageType.DISK));
assertThat(locations.get(0).getUri(), is(dir0.toURI()));
assertThat(locations.get(1).getStorageType(), is(StorageType.DISK));
assertThat(locations.get(1).getUri(), is(dir1.toURI()));
}
@Test (timeout = 30000)
public void testDataDirValidation() throws Throwable {
DataNodeDiskChecker diskChecker = mock(DataNodeDiskChecker.class);
doThrow(new IOException()).doThrow(new IOException()).doNothing()
.when(diskChecker).checkDir(any(LocalFileSystem.class), any(Path.class));
LocalFileSystem fs = mock(LocalFileSystem.class);
AbstractList<StorageLocation> locations = new ArrayList<StorageLocation>();
locations.add(StorageLocation.parse("file:/p1/"));
locations.add(StorageLocation.parse("file:/p2/"));
locations.add(StorageLocation.parse("file:/p3/"));
List<StorageLocation> checkedLocations =
DataNode.checkStorageLocations(locations, fs, diskChecker);
assertEquals("number of valid data dirs", 1, checkedLocations.size());
String validDir = checkedLocations.iterator().next().getFile().getPath();
assertThat("p3 should be valid", new File("/p3/").getPath(), is(validDir));
}
}
| 4,673 | 42.277778 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeStartupOptions.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.junit.Before;
import org.junit.Test;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertThat;
/**
* This test verifies DataNode command line processing.
*/
public class TestDatanodeStartupOptions {
private Configuration conf = null;
/**
* Process the given arg list as command line arguments to the DataNode
* to make sure we get the expected result. If the expected result is
* success then further validate that the parsed startup option is the
* same as what was expected.
*
* @param expectSuccess
* @param expectedOption
* @param conf
* @param arg
*/
private static void checkExpected(boolean expectSuccess,
StartupOption expectedOption,
Configuration conf,
String ... arg) {
String[] args = new String[arg.length];
int i = 0;
for (String currentArg : arg) {
args[i++] = currentArg;
}
boolean returnValue = DataNode.parseArguments(args, conf);
StartupOption option = DataNode.getStartupOption(conf);
assertThat(returnValue, is(expectSuccess));
if (expectSuccess) {
assertThat(option, is(expectedOption));
}
}
/**
* Reinitialize configuration before every test since DN stores the
* parsed StartupOption in the configuration.
*/
@Before
public void initConfiguration() {
conf = new HdfsConfiguration();
}
/**
* A few options that should all parse successfully.
*/
@Test (timeout=60000)
public void testStartupSuccess() {
checkExpected(true, StartupOption.REGULAR, conf);
checkExpected(true, StartupOption.REGULAR, conf, "-regular");
checkExpected(true, StartupOption.REGULAR, conf, "-REGULAR");
checkExpected(true, StartupOption.ROLLBACK, conf, "-rollback");
}
/**
* A few options that should all fail to parse.
*/
@Test (timeout=60000)
public void testStartupFailure() {
checkExpected(false, StartupOption.REGULAR, conf, "unknownoption");
checkExpected(false, StartupOption.REGULAR, conf, "-regular -rollback");
}
}
| 3,165 | 31.639175 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.FilenameFilter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.AppendTestUtil;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
import org.apache.hadoop.util.Time;
import org.apache.log4j.Level;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
/**
* This is the base class for simulating a variety of situations
* when blocks are being intentionally corrupted, unexpectedly modified,
* and so on before a block report is happening.
*
* By overriding {@link #sendBlockReports}, derived classes can test
* different variations of how block reports are split across storages
* and messages.
*/
public abstract class BlockReportTestBase {
public static final Log LOG = LogFactory.getLog(BlockReportTestBase.class);
private static short REPL_FACTOR = 1;
private static final int RAND_LIMIT = 2000;
private static final long DN_RESCAN_INTERVAL = 1;
private static final long DN_RESCAN_EXTRA_WAIT = 3 * DN_RESCAN_INTERVAL;
private static final int DN_N0 = 0;
private static final int FILE_START = 0;
private static final int BLOCK_SIZE = 1024;
private static final int NUM_BLOCKS = 10;
private static final int FILE_SIZE = NUM_BLOCKS * BLOCK_SIZE + 1;
protected MiniDFSCluster cluster;
private DistributedFileSystem fs;
private static final Random rand = new Random(RAND_LIMIT);
private static Configuration conf;
static {
initLoggers();
resetConfiguration();
}
@Before
public void startUpCluster() throws IOException {
REPL_FACTOR = 1; //Reset if case a test has modified the value
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL_FACTOR).build();
fs = cluster.getFileSystem();
}
@After
public void shutDownCluster() throws IOException {
fs.close();
cluster.shutdownDataNodes();
cluster.shutdown();
}
protected static void resetConfiguration() {
conf = new Configuration();
int customPerChecksumSize = 512;
int customBlockSize = customPerChecksumSize * 3;
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
conf.setLong(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, DN_RESCAN_INTERVAL);
}
// Generate a block report, optionally corrupting the generation
// stamp and/or length of one block.
private static StorageBlockReport[] getBlockReports(
DataNode dn, String bpid, boolean corruptOneBlockGs,
boolean corruptOneBlockLen) {
Map<DatanodeStorage, BlockListAsLongs> perVolumeBlockLists =
dn.getFSDataset().getBlockReports(bpid);
// Send block report
StorageBlockReport[] reports =
new StorageBlockReport[perVolumeBlockLists.size()];
boolean corruptedGs = false;
boolean corruptedLen = false;
int reportIndex = 0;
for(Map.Entry<DatanodeStorage, BlockListAsLongs> kvPair : perVolumeBlockLists.entrySet()) {
DatanodeStorage dnStorage = kvPair.getKey();
BlockListAsLongs blockList = kvPair.getValue();
// Walk the list of blocks until we find one each to corrupt the
// generation stamp and length, if so requested.
BlockListAsLongs.Builder builder = BlockListAsLongs.builder();
for (BlockReportReplica block : blockList) {
if (corruptOneBlockGs && !corruptedGs) {
long gsOld = block.getGenerationStamp();
long gsNew;
do {
gsNew = rand.nextInt();
} while (gsNew == gsOld);
block.setGenerationStamp(gsNew);
LOG.info("Corrupted the GS for block ID " + block);
corruptedGs = true;
} else if (corruptOneBlockLen && !corruptedLen) {
long lenOld = block.getNumBytes();
long lenNew;
do {
lenNew = rand.nextInt((int)lenOld - 1);
} while (lenNew == lenOld);
block.setNumBytes(lenNew);
LOG.info("Corrupted the length for block ID " + block);
corruptedLen = true;
}
builder.add(new BlockReportReplica(block));
}
reports[reportIndex++] =
new StorageBlockReport(dnStorage, builder.build());
}
return reports;
}
/**
* Utility routine to send block reports to the NN, either in a single call
* or reporting one storage per call.
*
* @throws IOException
*/
protected abstract void sendBlockReports(DatanodeRegistration dnR, String poolId,
StorageBlockReport[] reports) throws IOException;
/**
* Test write a file, verifies and closes it. Then the length of the blocks
* are messed up and BlockReport is forced.
* The modification of blocks' length has to be ignored
*
* @throws java.io.IOException on an error
*/
@Test(timeout=300000)
public void blockReport_01() throws IOException {
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path filePath = new Path("/" + METHOD_NAME + ".dat");
ArrayList<Block> blocks = prepareForRide(filePath, METHOD_NAME, FILE_SIZE);
if(LOG.isDebugEnabled()) {
LOG.debug("Number of blocks allocated " + blocks.size());
}
long[] oldLengths = new long[blocks.size()];
int tempLen;
for (int i = 0; i < blocks.size(); i++) {
Block b = blocks.get(i);
if(LOG.isDebugEnabled()) {
LOG.debug("Block " + b.getBlockName() + " before\t" + "Size " +
b.getNumBytes());
}
oldLengths[i] = b.getNumBytes();
if(LOG.isDebugEnabled()) {
LOG.debug("Setting new length");
}
tempLen = rand.nextInt(BLOCK_SIZE);
b.set(b.getBlockId(), tempLen, b.getGenerationStamp());
if(LOG.isDebugEnabled()) {
LOG.debug("Block " + b.getBlockName() + " after\t " + "Size " +
b.getNumBytes());
}
}
// all blocks belong to the same file, hence same BP
DataNode dn = cluster.getDataNodes().get(DN_N0);
String poolId = cluster.getNamesystem().getBlockPoolId();
DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
StorageBlockReport[] reports = getBlockReports(dn, poolId, false, false);
sendBlockReports(dnR, poolId, reports);
List<LocatedBlock> blocksAfterReport =
DFSTestUtil.getAllBlocks(fs.open(filePath));
if(LOG.isDebugEnabled()) {
LOG.debug("After mods: Number of blocks allocated " +
blocksAfterReport.size());
}
for (int i = 0; i < blocksAfterReport.size(); i++) {
ExtendedBlock b = blocksAfterReport.get(i).getBlock();
assertEquals("Length of " + i + "th block is incorrect",
oldLengths[i], b.getNumBytes());
}
}
/**
* Test write a file, verifies and closes it. Then a couple of random blocks
* is removed and BlockReport is forced; the FSNamesystem is pushed to
* recalculate required DN's activities such as replications and so on.
* The number of missing and under-replicated blocks should be the same in
* case of a single-DN cluster.
*
* @throws IOException in case of errors
*/
@Test(timeout=300000)
public void blockReport_02() throws IOException {
final String METHOD_NAME = GenericTestUtils.getMethodName();
LOG.info("Running test " + METHOD_NAME);
Path filePath = new Path("/" + METHOD_NAME + ".dat");
DFSTestUtil.createFile(fs, filePath,
FILE_SIZE, REPL_FACTOR, rand.nextLong());
// mock around with newly created blocks and delete some
File dataDir = new File(cluster.getDataDirectory());
assertTrue(dataDir.isDirectory());
List<ExtendedBlock> blocks2Remove = new ArrayList<ExtendedBlock>();
List<Integer> removedIndex = new ArrayList<Integer>();
List<LocatedBlock> lBlocks =
cluster.getNameNodeRpc().getBlockLocations(
filePath.toString(), FILE_START,
FILE_SIZE).getLocatedBlocks();
while (removedIndex.size() != 2) {
int newRemoveIndex = rand.nextInt(lBlocks.size());
if (!removedIndex.contains(newRemoveIndex))
removedIndex.add(newRemoveIndex);
}
for (Integer aRemovedIndex : removedIndex) {
blocks2Remove.add(lBlocks.get(aRemovedIndex).getBlock());
}
if(LOG.isDebugEnabled()) {
LOG.debug("Number of blocks allocated " + lBlocks.size());
}
final DataNode dn0 = cluster.getDataNodes().get(DN_N0);
for (ExtendedBlock b : blocks2Remove) {
if(LOG.isDebugEnabled()) {
LOG.debug("Removing the block " + b.getBlockName());
}
for (File f : findAllFiles(dataDir,
new MyFileFilter(b.getBlockName(), true))) {
DataNodeTestUtils.getFSDataset(dn0).unfinalizeBlock(b);
if (!f.delete()) {
LOG.warn("Couldn't delete " + b.getBlockName());
} else {
LOG.debug("Deleted file " + f.toString());
}
}
}
waitTil(TimeUnit.SECONDS.toMillis(DN_RESCAN_EXTRA_WAIT));
// all blocks belong to the same file, hence same BP
String poolId = cluster.getNamesystem().getBlockPoolId();
DatanodeRegistration dnR = dn0.getDNRegistrationForBP(poolId);
StorageBlockReport[] reports = getBlockReports(dn0, poolId, false, false);
sendBlockReports(dnR, poolId, reports);
BlockManagerTestUtil.getComputedDatanodeWork(cluster.getNamesystem()
.getBlockManager());
printStats();
assertEquals("Wrong number of MissingBlocks is found",
blocks2Remove.size(), cluster.getNamesystem().getMissingBlocksCount());
assertEquals("Wrong number of UnderReplicatedBlocks is found",
blocks2Remove.size(), cluster.getNamesystem().getUnderReplicatedBlocks());
}
/**
* Test writes a file and closes it.
* Block reported is generated with a bad GS for a single block.
* Block report is forced and the check for # of corrupted blocks is performed.
*
* @throws IOException in case of an error
*/
@Test(timeout=300000)
public void blockReport_03() throws IOException {
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path filePath = new Path("/" + METHOD_NAME + ".dat");
writeFile(METHOD_NAME, FILE_SIZE, filePath);
// all blocks belong to the same file, hence same BP
DataNode dn = cluster.getDataNodes().get(DN_N0);
String poolId = cluster.getNamesystem().getBlockPoolId();
DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
StorageBlockReport[] reports = getBlockReports(dn, poolId, true, false);
sendBlockReports(dnR, poolId, reports);
printStats();
assertThat("Wrong number of corrupt blocks",
cluster.getNamesystem().getCorruptReplicaBlocks(), is(1L));
assertThat("Wrong number of PendingDeletion blocks",
cluster.getNamesystem().getPendingDeletionBlocks(), is(0L));
}
/**
* Test writes a file and closes it.
* Block reported is generated with an extra block.
* Block report is forced and the check for # of pendingdeletion
* blocks is performed.
*
* @throws IOException in case of an error
*/
@Test(timeout=300000)
public void blockReport_04() throws IOException {
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path filePath = new Path("/" + METHOD_NAME + ".dat");
DFSTestUtil.createFile(fs, filePath,
FILE_SIZE, REPL_FACTOR, rand.nextLong());
DataNode dn = cluster.getDataNodes().get(DN_N0);
// all blocks belong to the same file, hence same BP
String poolId = cluster.getNamesystem().getBlockPoolId();
// Create a bogus new block which will not be present on the namenode.
ExtendedBlock b = new ExtendedBlock(
poolId, rand.nextLong(), 1024L, rand.nextLong());
dn.getFSDataset().createRbw(StorageType.DEFAULT, b, false);
DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
StorageBlockReport[] reports = getBlockReports(dn, poolId, false, false);
sendBlockReports(dnR, poolId, reports);
printStats();
assertThat("Wrong number of corrupt blocks",
cluster.getNamesystem().getCorruptReplicaBlocks(), is(0L));
assertThat("Wrong number of PendingDeletion blocks",
cluster.getNamesystem().getPendingDeletionBlocks(), is(1L));
}
/**
* Test creates a file and closes it.
* The second datanode is started in the cluster.
* As soon as the replication process is completed test runs
* Block report and checks that no underreplicated blocks are left
*
* @throws IOException in case of an error
*/
@Test(timeout=300000)
public void blockReport_06() throws Exception {
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path filePath = new Path("/" + METHOD_NAME + ".dat");
final int DN_N1 = DN_N0 + 1;
writeFile(METHOD_NAME, FILE_SIZE, filePath);
startDNandWait(filePath, true);
// all blocks belong to the same file, hence same BP
DataNode dn = cluster.getDataNodes().get(DN_N1);
String poolId = cluster.getNamesystem().getBlockPoolId();
DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
StorageBlockReport[] reports = getBlockReports(dn, poolId, false, false);
sendBlockReports(dnR, poolId, reports);
printStats();
assertEquals("Wrong number of PendingReplication Blocks",
0, cluster.getNamesystem().getUnderReplicatedBlocks());
}
/**
* Similar to BlockReport_03() but works with two DNs
* Test writes a file and closes it.
* The second datanode is started in the cluster.
* As soon as the replication process is completed test finds a block from
* the second DN and sets its GS to be < of original one.
* this is the markBlockAsCorrupt case 3 so we expect one pending deletion
* Block report is forced and the check for # of currupted blocks is performed.
* Another block is chosen and its length is set to a lesser than original.
* A check for another corrupted block is performed after yet another
* BlockReport
*
* @throws IOException in case of an error
*/
@Test(timeout=300000)
public void blockReport_07() throws Exception {
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path filePath = new Path("/" + METHOD_NAME + ".dat");
final int DN_N1 = DN_N0 + 1;
// write file and start second node to be "older" than the original
writeFile(METHOD_NAME, FILE_SIZE, filePath);
startDNandWait(filePath, true);
// all blocks belong to the same file, hence same BP
DataNode dn = cluster.getDataNodes().get(DN_N1);
String poolId = cluster.getNamesystem().getBlockPoolId();
DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
StorageBlockReport[] reports = getBlockReports(dn, poolId, true, false);
sendBlockReports(dnR, poolId, reports);
printStats();
assertThat("Wrong number of corrupt blocks",
cluster.getNamesystem().getCorruptReplicaBlocks(), is(0L));
assertThat("Wrong number of PendingDeletion blocks",
cluster.getNamesystem().getPendingDeletionBlocks(), is(1L));
assertThat("Wrong number of PendingReplication blocks",
cluster.getNamesystem().getPendingReplicationBlocks(), is(0L));
reports = getBlockReports(dn, poolId, false, true);
sendBlockReports(dnR, poolId, reports);
printStats();
assertThat("Wrong number of corrupt blocks",
cluster.getNamesystem().getCorruptReplicaBlocks(), is(1L));
assertThat("Wrong number of PendingDeletion blocks",
cluster.getNamesystem().getPendingDeletionBlocks(), is(1L));
assertThat("Wrong number of PendingReplication blocks",
cluster.getNamesystem().getPendingReplicationBlocks(), is(0L));
printStats();
}
/**
* The test set the configuration parameters for a large block size and
* restarts initiated single-node cluster.
* Then it writes a file > block_size and closes it.
* The second datanode is started in the cluster.
* As soon as the replication process is started and at least one TEMPORARY
* replica is found test forces BlockReport process and checks
* if the TEMPORARY replica isn't reported on it.
* Eventually, the configuration is being restored into the original state.
*
* @throws IOException in case of an error
*/
@Test(timeout=300000)
public void blockReport_08() throws IOException {
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path filePath = new Path("/" + METHOD_NAME + ".dat");
final int DN_N1 = DN_N0 + 1;
final int bytesChkSum = 1024 * 1000;
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, bytesChkSum);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 6 * bytesChkSum);
shutDownCluster();
startUpCluster();
try {
ArrayList<Block> blocks =
writeFile(METHOD_NAME, 12 * bytesChkSum, filePath);
Block bl = findBlock(filePath, 12 * bytesChkSum);
BlockChecker bc = new BlockChecker(filePath);
bc.start();
waitForTempReplica(bl, DN_N1);
// all blocks belong to the same file, hence same BP
DataNode dn = cluster.getDataNodes().get(DN_N1);
String poolId = cluster.getNamesystem().getBlockPoolId();
DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
StorageBlockReport[] reports = getBlockReports(dn, poolId, false, false);
sendBlockReports(dnR, poolId, reports);
printStats();
assertEquals("Wrong number of PendingReplication blocks",
blocks.size(), cluster.getNamesystem().getPendingReplicationBlocks());
try {
bc.join();
} catch (InterruptedException e) { }
} finally {
resetConfiguration(); // return the initial state of the configuration
}
}
// Similar to BlockReport_08 but corrupts GS and len of the TEMPORARY's
// replica block. Expect the same behaviour: NN should simply ignore this
// block
@Test(timeout=300000)
public void blockReport_09() throws IOException {
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path filePath = new Path("/" + METHOD_NAME + ".dat");
final int DN_N1 = DN_N0 + 1;
final int bytesChkSum = 1024 * 1000;
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, bytesChkSum);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 6 * bytesChkSum);
shutDownCluster();
startUpCluster();
// write file and start second node to be "older" than the original
try {
writeFile(METHOD_NAME, 12 * bytesChkSum, filePath);
Block bl = findBlock(filePath, 12 * bytesChkSum);
BlockChecker bc = new BlockChecker(filePath);
bc.start();
waitForTempReplica(bl, DN_N1);
// all blocks belong to the same file, hence same BP
DataNode dn = cluster.getDataNodes().get(DN_N1);
String poolId = cluster.getNamesystem().getBlockPoolId();
DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
StorageBlockReport[] reports = getBlockReports(dn, poolId, true, true);
sendBlockReports(dnR, poolId, reports);
printStats();
assertEquals("Wrong number of PendingReplication blocks",
2, cluster.getNamesystem().getPendingReplicationBlocks());
try {
bc.join();
} catch (InterruptedException e) {}
} finally {
resetConfiguration(); // return the initial state of the configuration
}
}
/**
* Test for the case where one of the DNs in the pipeline is in the
* process of doing a block report exactly when the block is closed.
* In this case, the block report becomes delayed until after the
* block is marked completed on the NN, and hence it reports an RBW
* replica for a COMPLETE block. Such a report should not be marked
* corrupt.
* This is a regression test for HDFS-2791.
*/
@Test(timeout=300000)
public void testOneReplicaRbwReportArrivesAfterBlockCompleted() throws Exception {
final CountDownLatch brFinished = new CountDownLatch(1);
DelayAnswer delayer = new GenericTestUtils.DelayAnswer(LOG) {
@Override
protected Object passThrough(InvocationOnMock invocation)
throws Throwable {
try {
return super.passThrough(invocation);
} finally {
// inform the test that our block report went through.
brFinished.countDown();
}
}
};
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path filePath = new Path("/" + METHOD_NAME + ".dat");
// Start a second DN for this test -- we're checking
// what happens when one of the DNs is slowed for some reason.
REPL_FACTOR = 2;
startDNandWait(null, false);
NameNode nn = cluster.getNameNode();
FSDataOutputStream out = fs.create(filePath, REPL_FACTOR);
try {
AppendTestUtil.write(out, 0, 10);
out.hflush();
// Set up a spy so that we can delay the block report coming
// from this node.
DataNode dn = cluster.getDataNodes().get(0);
DatanodeProtocolClientSideTranslatorPB spy =
DataNodeTestUtils.spyOnBposToNN(dn, nn);
Mockito.doAnswer(delayer)
.when(spy).blockReport(
Mockito.<DatanodeRegistration>anyObject(),
Mockito.anyString(),
Mockito.<StorageBlockReport[]>anyObject(),
Mockito.<BlockReportContext>anyObject());
// Force a block report to be generated. The block report will have
// an RBW replica in it. Wait for the RPC to be sent, but block
// it before it gets to the NN.
dn.scheduleAllBlockReport(0);
delayer.waitForCall();
} finally {
IOUtils.closeStream(out);
}
// Now that the stream is closed, the NN will have the block in COMPLETE
// state.
delayer.proceed();
brFinished.await();
// Verify that no replicas are marked corrupt, and that the
// file is still readable.
BlockManagerTestUtil.updateState(nn.getNamesystem().getBlockManager());
assertEquals(0, nn.getNamesystem().getCorruptReplicaBlocks());
DFSTestUtil.readFile(fs, filePath);
// Ensure that the file is readable even from the DN that we futzed with.
cluster.stopDataNode(1);
DFSTestUtil.readFile(fs, filePath);
}
private void waitForTempReplica(Block bl, int DN_N1) throws IOException {
final boolean tooLongWait = false;
final int TIMEOUT = 40000;
if(LOG.isDebugEnabled()) {
LOG.debug("Wait for datanode " + DN_N1 + " to appear");
}
while (cluster.getDataNodes().size() <= DN_N1) {
waitTil(20);
}
if(LOG.isDebugEnabled()) {
LOG.debug("Total number of DNs " + cluster.getDataNodes().size());
}
cluster.waitActive();
// Look about specified DN for the replica of the block from 1st DN
final DataNode dn1 = cluster.getDataNodes().get(DN_N1);
String bpid = cluster.getNamesystem().getBlockPoolId();
Replica r = DataNodeTestUtils.fetchReplicaInfo(dn1, bpid, bl.getBlockId());
long start = Time.monotonicNow();
int count = 0;
while (r == null) {
waitTil(5);
r = DataNodeTestUtils.fetchReplicaInfo(dn1, bpid, bl.getBlockId());
long waiting_period = Time.monotonicNow() - start;
if (count++ % 100 == 0)
if(LOG.isDebugEnabled()) {
LOG.debug("Has been waiting for " + waiting_period + " ms.");
}
if (waiting_period > TIMEOUT)
assertTrue("Was waiting too long to get ReplicaInfo from a datanode",
tooLongWait);
}
HdfsServerConstants.ReplicaState state = r.getState();
if(LOG.isDebugEnabled()) {
LOG.debug("Replica state before the loop " + state.getValue());
}
start = Time.monotonicNow();
while (state != HdfsServerConstants.ReplicaState.TEMPORARY) {
waitTil(5);
state = r.getState();
if(LOG.isDebugEnabled()) {
LOG.debug("Keep waiting for " + bl.getBlockName() +
" is in state " + state.getValue());
}
if (Time.monotonicNow() - start > TIMEOUT)
assertTrue("Was waiting too long for a replica to become TEMPORARY",
tooLongWait);
}
if(LOG.isDebugEnabled()) {
LOG.debug("Replica state after the loop " + state.getValue());
}
}
// Helper methods from here below...
// Write file and start second data node.
private ArrayList<Block> writeFile(final String METHOD_NAME,
final long fileSize,
Path filePath) {
ArrayList<Block> blocks = null;
try {
REPL_FACTOR = 2;
blocks = prepareForRide(filePath, METHOD_NAME, fileSize);
} catch (IOException e) {
if(LOG.isDebugEnabled()) {
LOG.debug("Caught exception ", e);
}
}
return blocks;
}
private void startDNandWait(Path filePath, boolean waitReplicas)
throws IOException, InterruptedException, TimeoutException {
if (LOG.isDebugEnabled()) {
LOG.debug("Before next DN start: " + cluster.getDataNodes().size());
}
cluster.startDataNodes(conf, 1, true, null, null);
cluster.waitClusterUp();
ArrayList<DataNode> datanodes = cluster.getDataNodes();
assertEquals(datanodes.size(), 2);
if (LOG.isDebugEnabled()) {
int lastDn = datanodes.size() - 1;
LOG.debug("New datanode "
+ cluster.getDataNodes().get(lastDn).getDisplayName()
+ " has been started");
}
if (waitReplicas) {
DFSTestUtil.waitReplication(fs, filePath, REPL_FACTOR);
}
}
private ArrayList<Block> prepareForRide(final Path filePath,
final String METHOD_NAME,
long fileSize) throws IOException {
LOG.info("Running test " + METHOD_NAME);
DFSTestUtil.createFile(fs, filePath, fileSize,
REPL_FACTOR, rand.nextLong());
return locatedToBlocks(cluster.getNameNodeRpc()
.getBlockLocations(filePath.toString(), FILE_START,
fileSize).getLocatedBlocks(), null);
}
private void printStats() {
BlockManagerTestUtil.updateState(cluster.getNamesystem().getBlockManager());
if(LOG.isDebugEnabled()) {
LOG.debug("Missing " + cluster.getNamesystem().getMissingBlocksCount());
LOG.debug("Corrupted " + cluster.getNamesystem().getCorruptReplicaBlocks());
LOG.debug("Under-replicated " + cluster.getNamesystem().
getUnderReplicatedBlocks());
LOG.debug("Pending delete " + cluster.getNamesystem().
getPendingDeletionBlocks());
LOG.debug("Pending replications " + cluster.getNamesystem().
getPendingReplicationBlocks());
LOG.debug("Excess " + cluster.getNamesystem().getExcessBlocks());
LOG.debug("Total " + cluster.getNamesystem().getBlocksTotal());
}
}
private ArrayList<Block> locatedToBlocks(final List<LocatedBlock> locatedBlks,
List<Integer> positionsToRemove) {
ArrayList<Block> newList = new ArrayList<Block>();
for (int i = 0; i < locatedBlks.size(); i++) {
if (positionsToRemove != null && positionsToRemove.contains(i)) {
if(LOG.isDebugEnabled()) {
LOG.debug(i + " block to be omitted");
}
continue;
}
newList.add(new Block(locatedBlks.get(i).getBlock().getLocalBlock()));
}
return newList;
}
private void waitTil(long waitPeriod) {
try { //Wait til next re-scan
Thread.sleep(waitPeriod);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
private List<File> findAllFiles(File top, FilenameFilter mask) {
if (top == null) return null;
ArrayList<File> ret = new ArrayList<File>();
for (File f : top.listFiles()) {
if (f.isDirectory())
ret.addAll(findAllFiles(f, mask));
else if (mask.accept(f, f.getName()))
ret.add(f);
}
return ret;
}
private class MyFileFilter implements FilenameFilter {
private String nameToAccept = "";
private boolean all = false;
public MyFileFilter(String nameToAccept, boolean all) {
if (nameToAccept == null)
throw new IllegalArgumentException("Argument isn't suppose to be null");
this.nameToAccept = nameToAccept;
this.all = all;
}
@Override
public boolean accept(File file, String s) {
if (all)
return s != null && s.startsWith(nameToAccept);
else
return s != null && s.equals(nameToAccept);
}
}
private static void initLoggers() {
DFSTestUtil.setNameNodeLogLevel(Level.ALL);
GenericTestUtils.setLogLevel(DataNode.LOG, Level.ALL);
GenericTestUtils.setLogLevel(BlockReportTestBase.LOG, Level.ALL);
}
private Block findBlock(Path path, long size) throws IOException {
Block ret;
List<LocatedBlock> lbs =
cluster.getNameNodeRpc()
.getBlockLocations(path.toString(),
FILE_START, size).getLocatedBlocks();
LocatedBlock lb = lbs.get(lbs.size() - 1);
// Get block from the first DN
ret = cluster.getDataNodes().get(DN_N0).
data.getStoredBlock(lb.getBlock()
.getBlockPoolId(), lb.getBlock().getBlockId());
return ret;
}
private class BlockChecker extends Thread {
final Path filePath;
public BlockChecker(final Path filePath) {
this.filePath = filePath;
}
@Override
public void run() {
try {
startDNandWait(filePath, true);
} catch (Exception e) {
e.printStackTrace();
Assert.fail("Failed to start BlockChecker: " + e);
}
}
}
}
| 32,134 | 36.279582 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeRollingUpgrade.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.*;
import java.io.File;
import java.io.IOException;
import java.util.Random;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSOutputStream;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSCluster.Builder;
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
import org.apache.hadoop.hdfs.TestRollingUpgrade;
import org.apache.hadoop.hdfs.client.BlockReportOptions;
import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.tools.DFSAdmin;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Test;
/**
* Ensure that the DataNode correctly handles rolling upgrade
* finalize and rollback.
*/
public class TestDataNodeRollingUpgrade {
private static final Log LOG = LogFactory.getLog(TestDataNodeRollingUpgrade.class);
private static final short REPL_FACTOR = 1;
private static final int BLOCK_SIZE = 1024 * 1024;
private static final long FILE_SIZE = BLOCK_SIZE;
private static final long SEED = 0x1BADF00DL;
Configuration conf;
MiniDFSCluster cluster = null;
DistributedFileSystem fs = null;
DataNode dn0 = null;
NameNode nn = null;
String blockPoolId = null;
private void startCluster() throws IOException {
conf = new HdfsConfiguration();
conf.setInt("dfs.blocksize", 1024*1024);
cluster = new Builder(conf).numDataNodes(REPL_FACTOR).build();
cluster.waitActive();
fs = cluster.getFileSystem();
nn = cluster.getNameNode(0);
assertNotNull(nn);
dn0 = cluster.getDataNodes().get(0);
assertNotNull(dn0);
blockPoolId = cluster.getNameNode(0).getNamesystem().getBlockPoolId();
}
private void shutdownCluster() {
if (cluster != null) {
cluster.shutdown();
cluster = null;
}
fs = null;
nn = null;
dn0 = null;
blockPoolId = null;
}
private void triggerHeartBeats() throws Exception {
// Sleep briefly so that DN learns of the rolling upgrade
// state and other states from heartbeats.
cluster.triggerHeartbeats();
Thread.sleep(5000);
}
/** Test assumes that the file has a single block */
private File getBlockForFile(Path path, boolean exists) throws IOException {
LocatedBlocks blocks = nn.getRpcServer().getBlockLocations(path.toString(),
0, Long.MAX_VALUE);
assertEquals("The test helper functions assume that each file has a single block",
1, blocks.getLocatedBlocks().size());
ExtendedBlock block = blocks.getLocatedBlocks().get(0).getBlock();
BlockLocalPathInfo bInfo = dn0.getFSDataset().getBlockLocalPathInfo(block);
File blockFile = new File(bInfo.getBlockPath());
assertEquals(exists, blockFile.exists());
return blockFile;
}
private File getTrashFileForBlock(File blockFile, boolean exists) {
File trashFile = new File(
dn0.getStorage().getTrashDirectoryForBlockFile(blockPoolId, blockFile));
assertEquals(exists, trashFile.exists());
return trashFile;
}
/**
* Ensures that the blocks belonging to the deleted file are in trash
*/
private void deleteAndEnsureInTrash(Path pathToDelete,
File blockFile, File trashFile) throws Exception {
assertTrue(blockFile.exists());
assertFalse(trashFile.exists());
// Now delete the file and ensure the corresponding block in trash
LOG.info("Deleting file " + pathToDelete + " during rolling upgrade");
fs.delete(pathToDelete, false);
assert(!fs.exists(pathToDelete));
triggerHeartBeats();
assertTrue(trashFile.exists());
assertFalse(blockFile.exists());
}
private boolean isTrashRootPresent() {
// Trash is disabled; trash root does not exist
BlockPoolSliceStorage bps = dn0.getStorage().getBPStorage(blockPoolId);
return bps.trashEnabled();
}
/**
* Ensures that the blocks from trash are restored
*/
private void ensureTrashRestored(File blockFile, File trashFile)
throws Exception {
assertTrue(blockFile.exists());
assertFalse(trashFile.exists());
assertFalse(isTrashRootPresent());
}
private boolean isBlockFileInPrevious(File blockFile) {
Pattern blockFilePattern = Pattern.compile(String.format(
"^(.*%1$scurrent%1$s.*%1$s)(current)(%1$s.*)$",
Pattern.quote(File.separator)));
Matcher matcher = blockFilePattern.matcher(blockFile.toString());
String previousFileName = matcher.replaceFirst("$1" + "previous" + "$3");
return ((new File(previousFileName)).exists());
}
private void startRollingUpgrade() throws Exception {
LOG.info("Starting rolling upgrade");
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
final DFSAdmin dfsadmin = new DFSAdmin(conf);
TestRollingUpgrade.runCmd(dfsadmin, true, "-rollingUpgrade", "prepare");
triggerHeartBeats();
// Ensure datanode rolling upgrade is started
assertTrue(dn0.getFSDataset().trashEnabled(blockPoolId));
}
private void finalizeRollingUpgrade() throws Exception {
LOG.info("Finalizing rolling upgrade");
final DFSAdmin dfsadmin = new DFSAdmin(conf);
TestRollingUpgrade.runCmd(dfsadmin, true, "-rollingUpgrade", "finalize");
triggerHeartBeats();
// Ensure datanode rolling upgrade is started
assertFalse(dn0.getFSDataset().trashEnabled(blockPoolId));
BlockPoolSliceStorage bps = dn0.getStorage().getBPStorage(blockPoolId);
assertFalse(bps.trashEnabled());
}
private void rollbackRollingUpgrade() throws Exception {
// Shutdown datanodes and namenodes
// Restart the namenode with rolling upgrade rollback
LOG.info("Starting rollback of the rolling upgrade");
MiniDFSCluster.DataNodeProperties dnprop = cluster.stopDataNode(0);
dnprop.setDnArgs("-rollback");
cluster.shutdownNameNodes();
cluster.restartNameNode("-rollingupgrade", "rollback");
cluster.restartDataNode(dnprop);
cluster.waitActive();
nn = cluster.getNameNode(0);
dn0 = cluster.getDataNodes().get(0);
triggerHeartBeats();
LOG.info("The cluster is active after rollback");
}
@Test (timeout=600000)
public void testDatanodeRollingUpgradeWithFinalize() throws Exception {
try {
startCluster();
rollingUpgradeAndFinalize();
// Do it again
rollingUpgradeAndFinalize();
} finally {
shutdownCluster();
}
}
@Test(timeout = 600000)
public void testDatanodeRUwithRegularUpgrade() throws Exception {
try {
startCluster();
rollingUpgradeAndFinalize();
DataNodeProperties dn = cluster.stopDataNode(0);
cluster.restartNameNode(0, true, "-upgrade");
cluster.restartDataNode(dn, true);
cluster.waitActive();
fs = cluster.getFileSystem(0);
Path testFile3 = new Path("/" + GenericTestUtils.getMethodName()
+ ".03.dat");
DFSTestUtil.createFile(fs, testFile3, FILE_SIZE, REPL_FACTOR, SEED);
cluster.getFileSystem().finalizeUpgrade();
} finally {
shutdownCluster();
}
}
private void rollingUpgradeAndFinalize() throws IOException, Exception {
// Create files in DFS.
Path testFile1 = new Path("/" + GenericTestUtils.getMethodName() + ".01.dat");
Path testFile2 = new Path("/" + GenericTestUtils.getMethodName() + ".02.dat");
DFSTestUtil.createFile(fs, testFile1, FILE_SIZE, REPL_FACTOR, SEED);
DFSTestUtil.createFile(fs, testFile2, FILE_SIZE, REPL_FACTOR, SEED);
startRollingUpgrade();
File blockFile = getBlockForFile(testFile2, true);
File trashFile = getTrashFileForBlock(blockFile, false);
cluster.triggerBlockReports();
deleteAndEnsureInTrash(testFile2, blockFile, trashFile);
finalizeRollingUpgrade();
// Ensure that delete file testFile2 stays deleted after finalize
assertFalse(isTrashRootPresent());
assert(!fs.exists(testFile2));
assert(fs.exists(testFile1));
}
@Test (timeout=600000)
public void testDatanodeRollingUpgradeWithRollback() throws Exception {
try {
startCluster();
// Create files in DFS.
Path testFile1 = new Path("/" + GenericTestUtils.getMethodName() + ".01.dat");
DFSTestUtil.createFile(fs, testFile1, FILE_SIZE, REPL_FACTOR, SEED);
String fileContents1 = DFSTestUtil.readFile(fs, testFile1);
startRollingUpgrade();
File blockFile = getBlockForFile(testFile1, true);
File trashFile = getTrashFileForBlock(blockFile, false);
deleteAndEnsureInTrash(testFile1, blockFile, trashFile);
// Now perform a rollback to restore DFS to the pre-rollback state.
rollbackRollingUpgrade();
// Ensure that block was restored from trash
ensureTrashRestored(blockFile, trashFile);
// Ensure that files exist and restored file contents are the same.
assert(fs.exists(testFile1));
String fileContents2 = DFSTestUtil.readFile(fs, testFile1);
assertThat(fileContents1, is(fileContents2));
} finally {
shutdownCluster();
}
}
@Test (timeout=600000)
// Test DatanodeXceiver has correct peer-dataxceiver pairs for sending OOB message
public void testDatanodePeersXceiver() throws Exception {
try {
startCluster();
// Create files in DFS.
String testFile1 = "/" + GenericTestUtils.getMethodName() + ".01.dat";
String testFile2 = "/" + GenericTestUtils.getMethodName() + ".02.dat";
String testFile3 = "/" + GenericTestUtils.getMethodName() + ".03.dat";
DFSClient client1 = new DFSClient(NameNode.getAddress(conf), conf);
DFSClient client2 = new DFSClient(NameNode.getAddress(conf), conf);
DFSClient client3 = new DFSClient(NameNode.getAddress(conf), conf);
DFSOutputStream s1 = (DFSOutputStream) client1.create(testFile1, true);
DFSOutputStream s2 = (DFSOutputStream) client2.create(testFile2, true);
DFSOutputStream s3 = (DFSOutputStream) client3.create(testFile3, true);
byte[] toWrite = new byte[1024*1024*8];
Random rb = new Random(1111);
rb.nextBytes(toWrite);
s1.write(toWrite, 0, 1024*1024*8);
s1.flush();
s2.write(toWrite, 0, 1024*1024*8);
s2.flush();
s3.write(toWrite, 0, 1024*1024*8);
s3.flush();
assertTrue(dn0.getXferServer().getNumPeersXceiver() == dn0.getXferServer()
.getNumPeersXceiver());
s1.close();
s2.close();
s3.close();
assertTrue(dn0.getXferServer().getNumPeersXceiver() == dn0.getXferServer()
.getNumPeersXceiver());
client1.close();
client2.close();
client3.close();
} finally {
shutdownCluster();
}
}
/**
* Support for layout version change with rolling upgrade was
* added by HDFS-6800 and HDFS-6981.
*/
@Test(timeout=300000)
public void testWithLayoutChangeAndFinalize() throws Exception {
final long seed = 0x600DF00D;
try {
startCluster();
Path[] paths = new Path[3];
File[] blockFiles = new File[3];
// Create two files in DFS.
for (int i = 0; i < 2; ++i) {
paths[i] = new Path("/" + GenericTestUtils.getMethodName() + "." + i + ".dat");
DFSTestUtil.createFile(fs, paths[i], BLOCK_SIZE, (short) 2, seed);
}
startRollingUpgrade();
// Delete the first file. The DN will save its block files in trash.
blockFiles[0] = getBlockForFile(paths[0], true);
File trashFile0 = getTrashFileForBlock(blockFiles[0], false);
deleteAndEnsureInTrash(paths[0], blockFiles[0], trashFile0);
// Restart the DN with a new layout version to trigger layout upgrade.
LOG.info("Shutting down the Datanode");
MiniDFSCluster.DataNodeProperties dnprop = cluster.stopDataNode(0);
DFSTestUtil.addDataNodeLayoutVersion(
DataNodeLayoutVersion.CURRENT_LAYOUT_VERSION - 1,
"Test Layout for TestDataNodeRollingUpgrade");
LOG.info("Restarting the DataNode");
cluster.restartDataNode(dnprop, true);
cluster.waitActive();
dn0 = cluster.getDataNodes().get(0);
LOG.info("The DN has been restarted");
assertFalse(trashFile0.exists());
assertFalse(dn0.getStorage().getBPStorage(blockPoolId).isTrashAllowed(blockFiles[0]));
// Ensure that the block file for the first file was moved from 'trash' to 'previous'.
assertTrue(isBlockFileInPrevious(blockFiles[0]));
assertFalse(isTrashRootPresent());
// Delete the second file. Ensure that its block file is in previous.
blockFiles[1] = getBlockForFile(paths[1], true);
fs.delete(paths[1], false);
assertTrue(isBlockFileInPrevious(blockFiles[1]));
assertFalse(isTrashRootPresent());
// Finalize and ensure that neither block file exists in trash or previous.
finalizeRollingUpgrade();
assertFalse(isTrashRootPresent());
assertFalse(isBlockFileInPrevious(blockFiles[0]));
assertFalse(isBlockFileInPrevious(blockFiles[1]));
} finally {
shutdownCluster();
}
}
/**
* Support for layout version change with rolling upgrade was
* added by HDFS-6800 and HDFS-6981.
*/
@Test(timeout=300000)
public void testWithLayoutChangeAndRollback() throws Exception {
final long seed = 0x600DF00D;
try {
startCluster();
Path[] paths = new Path[3];
File[] blockFiles = new File[3];
// Create two files in DFS.
for (int i = 0; i < 2; ++i) {
paths[i] = new Path("/" + GenericTestUtils.getMethodName() + "." + i + ".dat");
DFSTestUtil.createFile(fs, paths[i], BLOCK_SIZE, (short) 1, seed);
}
startRollingUpgrade();
// Delete the first file. The DN will save its block files in trash.
blockFiles[0] = getBlockForFile(paths[0], true);
File trashFile0 = getTrashFileForBlock(blockFiles[0], false);
deleteAndEnsureInTrash(paths[0], blockFiles[0], trashFile0);
// Restart the DN with a new layout version to trigger layout upgrade.
LOG.info("Shutting down the Datanode");
MiniDFSCluster.DataNodeProperties dnprop = cluster.stopDataNode(0);
DFSTestUtil.addDataNodeLayoutVersion(
DataNodeLayoutVersion.CURRENT_LAYOUT_VERSION - 1,
"Test Layout for TestDataNodeRollingUpgrade");
LOG.info("Restarting the DataNode");
cluster.restartDataNode(dnprop, true);
cluster.waitActive();
dn0 = cluster.getDataNodes().get(0);
LOG.info("The DN has been restarted");
assertFalse(trashFile0.exists());
assertFalse(dn0.getStorage().getBPStorage(blockPoolId).isTrashAllowed(blockFiles[0]));
// Ensure that the block file for the first file was moved from 'trash' to 'previous'.
assertTrue(isBlockFileInPrevious(blockFiles[0]));
assertFalse(isTrashRootPresent());
// Delete the second file. Ensure that its block file is in previous.
blockFiles[1] = getBlockForFile(paths[1], true);
fs.delete(paths[1], false);
assertTrue(isBlockFileInPrevious(blockFiles[1]));
assertFalse(isTrashRootPresent());
// Create and delete a third file. Its block file should not be
// in either trash or previous after deletion.
paths[2] = new Path("/" + GenericTestUtils.getMethodName() + ".2.dat");
DFSTestUtil.createFile(fs, paths[2], BLOCK_SIZE, (short) 1, seed);
blockFiles[2] = getBlockForFile(paths[2], true);
fs.delete(paths[2], false);
assertFalse(isBlockFileInPrevious(blockFiles[2]));
assertFalse(isTrashRootPresent());
// Rollback and ensure that the first two file contents were restored.
rollbackRollingUpgrade();
for (int i = 0; i < 2; ++i) {
byte[] actual = DFSTestUtil.readFileBuffer(fs, paths[i]);
byte[] calculated = DFSTestUtil.calculateFileContentsFromSeed(seed, BLOCK_SIZE);
assertArrayEquals(actual, calculated);
}
// And none of the block files must be in previous or trash.
assertFalse(isTrashRootPresent());
for (int i = 0; i < 3; ++i) {
assertFalse(isBlockFileInPrevious(blockFiles[i]));
}
} finally {
shutdownCluster();
}
}
}
| 17,552 | 36.426439 | 92 |
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.